summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug22
-rw-r--r--drivers/gpu/drm/i915/Makefile26
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c899
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.h24
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c80
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c325
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c806
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c244
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c4570
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c133
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c133
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h202
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c2766
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c692
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c404
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c178
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c1363
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c683
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c300
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c278
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c162
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c552
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c1406
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.h52
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c143
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c148
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c209
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h33
-rw-r--r--drivers/gpu/drm/i915/dma_resv_utils.c17
-rw-r--r--drivers/gpu/drm/i915/dma_resv_utils.h13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c132
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c113
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c59
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c24
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c112
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h43
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h19
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c51
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c41
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c52
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c28
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c75
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c54
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c22
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c1
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c1
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c1
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt_pm.c27
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c15
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c11
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c635
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.h127
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c59
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.h17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_sseu.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h93
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c167
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c59
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_stats.h60
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c3896
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.h47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c197
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c49
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c5415
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h167
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h41
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c (renamed from drivers/gpu/drm/i915/intel_region_lmem.c)4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.h (renamed from drivers/gpu/drm/i915/intel_region_lmem.h)2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c101
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c272
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c61
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c103
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c646
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c15
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c13
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_pm.c203
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c4741
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c8
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c173
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c4701
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c80
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c28
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c5
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c14
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c196
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c16
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c457
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c54
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c336
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h11
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h41
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h12
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c4
-rw-r--r--drivers/gpu/drm/i915/i915_active.c7
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c763
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c16
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h118
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c132
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c25
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c475
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h3
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c2
-rw-r--r--drivers/gpu/drm/i915/i915_params.c2
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c7
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c17
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c95
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h35
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h74
-rw-r--r--drivers/gpu/drm/i915/i915_request.c178
-rw-r--r--drivers/gpu/drm/i915/i915_request.h10
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c32
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h7
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler_types.h10
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c33
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c25
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c2
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h7
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h15
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h3
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c159
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h4
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c136
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c2
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h13
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c39
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1108
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h7
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c28
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c110
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c19
237 files changed, 24949 insertions, 20815 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 25cd9788a4d5..be76054c01d8 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -31,10 +31,13 @@ config DRM_I915_DEBUG
select DRM_DEBUG_SELFTEST
select DMABUF_SELFTESTS
select SW_SYNC # signaling validation framework (igt/syncobj*)
+ select DRM_I915_WERROR
+ select DRM_I915_DEBUG_GEM
+ select DRM_I915_DEBUG_GEM_ONCE
+ select DRM_I915_DEBUG_MMIO
+ select DRM_I915_DEBUG_RUNTIME_PM
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
- select DRM_I915_DEBUG_RUNTIME_PM
- select DRM_I915_DEBUG_MMIO
default n
help
Choose this option to turn on extra driver debugging that may affect
@@ -69,6 +72,21 @@ config DRM_I915_DEBUG_GEM
If in doubt, say "N".
+config DRM_I915_DEBUG_GEM_ONCE
+ bool "Make a GEM debug failure fatal"
+ default n
+ depends on DRM_I915_DEBUG_GEM
+ help
+ During development, we often only want the very first failure
+ as that would otherwise be lost in the deluge of subsequent
+ failures. However, more casual testers may not want to trigger
+ a hard BUG_ON and hope that the system remains sufficiently usable
+ to capture a bug report in situ.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_I915_ERRLOG_GEM
bool "Insert extra logging (very verbose) for common GEM errors"
default n
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 6d9e81ea67f4..32bd1fdffffe 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -59,6 +59,7 @@ i915-y += i915_drv.o \
# core library code
i915-y += \
+ dma_resv_utils.o \
i915_memcpy.o \
i915_mm.o \
i915_sw_fence.o \
@@ -83,6 +84,7 @@ gt-y += \
gt/gen6_engine_cs.o \
gt/gen6_ppgtt.o \
gt/gen7_renderclear.o \
+ gt/gen8_engine_cs.o \
gt/gen8_ppgtt.o \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
@@ -92,6 +94,7 @@ gt-y += \
gt/intel_engine_heartbeat.o \
gt/intel_engine_pm.o \
gt/intel_engine_user.o \
+ gt/intel_execlists_submission.o \
gt/intel_ggtt.o \
gt/intel_ggtt_fencing.o \
gt/intel_gt.o \
@@ -107,6 +110,7 @@ gt-y += \
gt/intel_mocs.o \
gt/intel_ppgtt.o \
gt/intel_rc6.o \
+ gt/intel_region_lmem.o \
gt/intel_renderstate.o \
gt/intel_reset.o \
gt/intel_ring.o \
@@ -132,6 +136,7 @@ gem-y += \
gem/i915_gem_clflush.o \
gem/i915_gem_client_blt.o \
gem/i915_gem_context.o \
+ gem/i915_gem_create.o \
gem/i915_gem_dmabuf.o \
gem/i915_gem_domain.o \
gem/i915_gem_execbuffer.o \
@@ -167,7 +172,6 @@ i915-y += \
i915_scheduler.o \
i915_trace_points.o \
i915_vma.o \
- intel_region_lmem.o \
intel_wopcm.o
# general-purpose microcontroller (GuC) support
@@ -197,13 +201,17 @@ i915-y += \
display/intel_color.o \
display/intel_combo_phy.o \
display/intel_connector.o \
+ display/intel_crtc.o \
display/intel_csr.o \
+ display/intel_cursor.o \
display/intel_display.o \
display/intel_display_power.o \
display/intel_dpio_phy.o \
+ display/intel_dpll.o \
display/intel_dpll_mgr.o \
display/intel_dsb.o \
display/intel_fbc.o \
+ display/intel_fdi.o \
display/intel_fifo_underrun.o \
display/intel_frontbuffer.o \
display/intel_global_state.o \
@@ -215,7 +223,8 @@ i915-y += \
display/intel_quirks.o \
display/intel_sprite.o \
display/intel_tc.o \
- display/intel_vga.o
+ display/intel_vga.o \
+ display/i9xx_plane.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
@@ -234,6 +243,7 @@ i915-y += \
display/intel_crt.o \
display/intel_ddi.o \
display/intel_dp.o \
+ display/intel_dp_aux.o \
display/intel_dp_aux_backlight.o \
display/intel_dp_hdcp.o \
display/intel_dp_link_training.o \
@@ -247,9 +257,11 @@ i915-y += \
display/intel_lspcon.o \
display/intel_lvds.o \
display/intel_panel.o \
+ display/intel_pps.o \
display/intel_sdvo.o \
display/intel_tv.o \
display/intel_vdsc.o \
+ display/intel_vrr.o \
display/vlv_dsi.o \
display/vlv_dsi_pll.o
@@ -284,15 +296,7 @@ obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
# exclude some broken headers from the test coverage
no-header-test := \
- display/intel_vbt_defs.h \
- gvt/execlist.h \
- gvt/fb_decoder.h \
- gvt/gtt.h \
- gvt/gvt.h \
- gvt/interrupt.h \
- gvt/mmio_context.h \
- gvt/mpt.h \
- gvt/scheduler.h
+ display/intel_vbt_defs.h
extra-$(CONFIG_DRM_I915_WERROR) += \
$(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
new file mode 100644
index 000000000000..d30374df67f0
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -0,0 +1,899 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_display_types.h"
+#include "intel_sprite.h"
+#include "i9xx_plane.h"
+
+/* Primary plane formats for gen <= 3 */
+static const u32 i8xx_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+};
+
+/* Primary plane formats for ivb (no fp16 due to hw issue) */
+static const u32 ivb_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+};
+
+/* Primary plane formats for gen >= 4, except ivb */
+static const u32 i965_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XBGR16161616F,
+};
+
+/* Primary plane formats for vlv/chv */
+static const u32 vlv_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XBGR16161616F,
+};
+
+static const u64 i9xx_format_modifiers[] = {
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XRGB8888:
+ return modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED;
+ default:
+ return false;
+ }
+}
+
+static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XBGR16161616F:
+ return modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED;
+ default:
+ return false;
+ }
+}
+
+static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
+{
+ if (!HAS_FBC(dev_priv))
+ return false;
+
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ return i9xx_plane == PLANE_A; /* tied to pipe A */
+ else if (IS_IVYBRIDGE(dev_priv))
+ return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
+ i9xx_plane == PLANE_C;
+ else if (INTEL_GEN(dev_priv) >= 4)
+ return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
+ else
+ return i9xx_plane == PLANE_A;
+}
+
+static bool i9xx_plane_has_windowing(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ return i9xx_plane == PLANE_B;
+ else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ return false;
+ else if (IS_GEN(dev_priv, 4))
+ return i9xx_plane == PLANE_C;
+ else
+ return i9xx_plane == PLANE_B ||
+ i9xx_plane == PLANE_C;
+}
+
+static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ u32 dspcntr;
+
+ dspcntr = DISPLAY_PLANE_ENABLE;
+
+ if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
+ IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ dspcntr |= DISPPLANE_BGRX555;
+ break;
+ case DRM_FORMAT_ARGB1555:
+ dspcntr |= DISPPLANE_BGRA555;
+ break;
+ case DRM_FORMAT_RGB565:
+ dspcntr |= DISPPLANE_BGRX565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ dspcntr |= DISPPLANE_BGRX888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ dspcntr |= DISPPLANE_RGBX888;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISPPLANE_BGRA888;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBA888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ dspcntr |= DISPPLANE_BGRX101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ dspcntr |= DISPPLANE_RGBX101010;
+ break;
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRA101010;
+ break;
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBA101010;
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ dspcntr |= DISPPLANE_RGBX161616;
+ break;
+ default:
+ MISSING_CASE(fb->format->format);
+ return 0;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 4 &&
+ fb->modifier == I915_FORMAT_MOD_X_TILED)
+ dspcntr |= DISPPLANE_TILED;
+
+ if (rotation & DRM_MODE_ROTATE_180)
+ dspcntr |= DISPPLANE_ROTATE_180;
+
+ if (rotation & DRM_MODE_REFLECT_X)
+ dspcntr |= DISPPLANE_MIRROR;
+
+ return dspcntr;
+}
+
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int src_x, src_y, src_w;
+ u32 offset;
+ int ret;
+
+ ret = intel_plane_compute_gtt(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
+
+ /* Undocumented hardware limit on i965/g4x/vlv/chv */
+ if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
+ return -EINVAL;
+
+ intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
+ plane_state, 0);
+ else
+ offset = 0;
+
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate_to(&plane_state->uapi.src,
+ src_x << 16, src_y << 16);
+
+ /* HSW/BDW do this automagically in hardware */
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
+ unsigned int rotation = plane_state->hw.rotation;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ if (rotation & DRM_MODE_ROTATE_180) {
+ src_x += src_w - 1;
+ src_y += src_h - 1;
+ } else if (rotation & DRM_MODE_REFLECT_X) {
+ src_x += src_w - 1;
+ }
+ }
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ drm_WARN_ON(&dev_priv->drm, src_x > 8191 || src_y > 4095);
+ } else if (INTEL_GEN(dev_priv) >= 4 &&
+ fb->modifier == I915_FORMAT_MOD_X_TILED) {
+ drm_WARN_ON(&dev_priv->drm, src_x > 4095 || src_y > 4095);
+ }
+
+ plane_state->color_plane[0].offset = offset;
+ plane_state->color_plane[0].x = src_x;
+ plane_state->color_plane[0].y = src_y;
+
+ return 0;
+}
+
+static int
+i9xx_plane_check(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ int ret;
+
+ ret = chv_plane_check_rotation(plane_state);
+ if (ret)
+ return ret;
+
+ ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ i9xx_plane_has_windowing(plane));
+ if (ret)
+ return ret;
+
+ ret = i9xx_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ ret = intel_plane_check_src_coordinates(plane_state);
+ if (ret)
+ return ret;
+
+ plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
+
+ return 0;
+}
+
+static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dspcntr = 0;
+
+ if (crtc_state->gamma_enable)
+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
+
+ if (INTEL_GEN(dev_priv) < 5)
+ dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
+
+ return dspcntr;
+}
+
+static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ /*
+ * g4x bspec says 64bpp pixel rate can't exceed 80%
+ * of cdclk when the sprite plane is enabled on the
+ * same pipe. ilk/snb bspec says 64bpp pixel rate is
+ * never allowed to exceed 80% of cdclk. Let's just go
+ * with the ilk/snb limit always.
+ */
+ if (cpp == 8) {
+ *num = 10;
+ *den = 8;
+ } else {
+ *num = 1;
+ *den = 1;
+ }
+}
+
+static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ /* two pixels per clock with double wide pipe */
+ if (crtc_state->double_wide)
+ den *= 2;
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
+static void i9xx_update_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ u32 linear_offset;
+ int x = plane_state->color_plane[0].x;
+ int y = plane_state->color_plane[0].y;
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ int crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ int crtc_h = drm_rect_height(&plane_state->uapi.dst);
+ unsigned long irqflags;
+ u32 dspaddr_offset;
+ u32 dspcntr;
+
+ dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
+
+ linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ dspaddr_offset = plane_state->color_plane[0].offset;
+ else
+ dspaddr_offset = linear_offset;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
+ plane_state->color_plane[0].stride);
+
+ if (INTEL_GEN(dev_priv) < 4) {
+ /*
+ * PLANE_A doesn't actually have a full window
+ * generator but let's assume we still need to
+ * program whatever is there.
+ */
+ intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
+ } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+ intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
+ intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
+ }
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
+ (y << 16) | x);
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
+ linear_offset);
+ intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
+ (y << 16) | x);
+ }
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+ if (INTEL_GEN(dev_priv) >= 4)
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ else
+ intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void i9xx_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ unsigned long irqflags;
+ u32 dspcntr;
+
+ /*
+ * DSPCNTR pipe gamma enable on g4x+ and pipe csc
+ * enable on ilk+ affect the pipe bottom color as
+ * well, so we must configure them even if the plane
+ * is disabled.
+ *
+ * On pre-g4x there is no way to gamma correct the
+ * pipe bottom color but we'll keep on doing this
+ * anyway so that the crtc state readout works correctly.
+ */
+ dspcntr = i9xx_plane_ctl_crtc(crtc_state);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+ if (INTEL_GEN(dev_priv) >= 4)
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
+ else
+ intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+g4x_primary_async_flip(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ bool async_flip)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ u32 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
+ u32 dspaddr_offset = plane_state->color_plane[0].offset;
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ unsigned long irqflags;
+
+ if (async_flip)
+ dspcntr |= DISPPLANE_ASYNC_FLIP;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+vlv_primary_async_flip(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ bool async_flip)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ u32 dspaddr_offset = plane_state->color_plane[0].offset;
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ intel_de_write_fw(dev_priv, DSPADDR_VLV(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+bdw_primary_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_enable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+bdw_primary_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_disable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ivb_primary_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ivb_primary_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ilk_primary_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ilk_primary_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+vlv_primary_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ i915_enable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+vlv_primary_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ i915_disable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ intel_wakeref_t wakeref;
+ bool ret;
+ u32 val;
+
+ /*
+ * Not 100% correct for planes that can move between pipes,
+ * but that's only the case for gen2-4 which don't have any
+ * display power wells.
+ */
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
+
+ ret = val & DISPLAY_PLANE_ENABLE;
+
+ if (INTEL_GEN(dev_priv) >= 5)
+ *pipe = plane->pipe;
+ else
+ *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+ DISPPLANE_SEL_PIPE_SHIFT;
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static unsigned int
+hsw_primary_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */
+ return min(8192 * cpp, 32 * 1024);
+}
+
+static unsigned int
+ilk_primary_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return min(4096 * cpp, 32 * 1024);
+ else
+ return 32 * 1024;
+}
+
+unsigned int
+i965_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return min(4096 * cpp, 16 * 1024);
+ else
+ return 32 * 1024;
+}
+
+static unsigned int
+i9xx_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+ if (INTEL_GEN(dev_priv) >= 3) {
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return 8*1024;
+ else
+ return 16*1024;
+ } else {
+ if (plane->i9xx_plane == PLANE_C)
+ return 4*1024;
+ else
+ return 8*1024;
+ }
+}
+
+static const struct drm_plane_funcs i965_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = i965_plane_format_mod_supported,
+};
+
+static const struct drm_plane_funcs i8xx_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = i8xx_plane_format_mod_supported,
+};
+
+struct intel_plane *
+intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ struct intel_plane *plane;
+ const struct drm_plane_funcs *plane_funcs;
+ unsigned int supported_rotations;
+ const u32 *formats;
+ int num_formats;
+ int ret, zpos;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ return skl_universal_plane_create(dev_priv, pipe,
+ PLANE_PRIMARY);
+
+ plane = intel_plane_alloc();
+ if (IS_ERR(plane))
+ return plane;
+
+ plane->pipe = pipe;
+ /*
+ * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
+ * port is hooked to pipe B. Hence we want plane A feeding pipe B.
+ */
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 &&
+ INTEL_NUM_PIPES(dev_priv) == 2)
+ plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
+ else
+ plane->i9xx_plane = (enum i9xx_plane_id) pipe;
+ plane->id = PLANE_PRIMARY;
+ plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
+
+ plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
+ if (plane->has_fbc) {
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
+ }
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ formats = vlv_primary_formats;
+ num_formats = ARRAY_SIZE(vlv_primary_formats);
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ /*
+ * WaFP16GammaEnabling:ivb
+ * "Workaround : When using the 64-bit format, the plane
+ * output on each color channel has one quarter amplitude.
+ * It can be brought up to full amplitude by using pipe
+ * gamma correction or pipe color space conversion to
+ * multiply the plane output by four."
+ *
+ * There is no dedicated plane gamma for the primary plane,
+ * and using the pipe gamma/csc could conflict with other
+ * planes, so we choose not to expose fp16 on IVB primary
+ * planes. HSW primary planes no longer have this problem.
+ */
+ if (IS_IVYBRIDGE(dev_priv)) {
+ formats = ivb_primary_formats;
+ num_formats = ARRAY_SIZE(ivb_primary_formats);
+ } else {
+ formats = i965_primary_formats;
+ num_formats = ARRAY_SIZE(i965_primary_formats);
+ }
+ } else {
+ formats = i8xx_primary_formats;
+ num_formats = ARRAY_SIZE(i8xx_primary_formats);
+ }
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ plane_funcs = &i965_plane_funcs;
+ else
+ plane_funcs = &i8xx_plane_funcs;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ plane->min_cdclk = vlv_plane_min_cdclk;
+ else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ plane->min_cdclk = hsw_plane_min_cdclk;
+ else if (IS_IVYBRIDGE(dev_priv))
+ plane->min_cdclk = ivb_plane_min_cdclk;
+ else
+ plane->min_cdclk = i9xx_plane_min_cdclk;
+
+ if (HAS_GMCH(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 4)
+ plane->max_stride = i965_plane_max_stride;
+ else
+ plane->max_stride = i9xx_plane_max_stride;
+ } else {
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ plane->max_stride = hsw_primary_max_stride;
+ else
+ plane->max_stride = ilk_primary_max_stride;
+ }
+
+ plane->update_plane = i9xx_update_plane;
+ plane->disable_plane = i9xx_disable_plane;
+ plane->get_hw_state = i9xx_plane_get_hw_state;
+ plane->check_plane = i9xx_plane_check;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ plane->async_flip = vlv_primary_async_flip;
+ plane->enable_flip_done = vlv_primary_enable_flip_done;
+ plane->disable_flip_done = vlv_primary_disable_flip_done;
+ } else if (IS_BROADWELL(dev_priv)) {
+ plane->need_async_flip_disable_wa = true;
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = bdw_primary_enable_flip_done;
+ plane->disable_flip_done = bdw_primary_disable_flip_done;
+ } else if (INTEL_GEN(dev_priv) >= 7) {
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = ivb_primary_enable_flip_done;
+ plane->disable_flip_done = ivb_primary_disable_flip_done;
+ } else if (INTEL_GEN(dev_priv) >= 5) {
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = ilk_primary_enable_flip_done;
+ plane->disable_flip_done = ilk_primary_disable_flip_done;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ 0, plane_funcs,
+ formats, num_formats,
+ i9xx_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY,
+ "primary %c", pipe_name(pipe));
+ else
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ 0, plane_funcs,
+ formats, num_formats,
+ i9xx_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY,
+ "plane %c",
+ plane_name(plane->i9xx_plane));
+ if (ret)
+ goto fail;
+
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X;
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
+ } else {
+ supported_rotations = DRM_MODE_ROTATE_0;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ drm_plane_create_rotation_property(&plane->base,
+ DRM_MODE_ROTATE_0,
+ supported_rotations);
+
+ zpos = 0;
+ drm_plane_create_zpos_immutable_property(&plane->base, zpos);
+
+ drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+
+ return plane;
+
+fail:
+ intel_plane_free(plane);
+
+ return ERR_PTR(ret);
+}
+
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h
new file mode 100644
index 000000000000..ca963c2a8457
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _I9XX_PLANE_H_
+#define _I9XX_PLANE_H_
+
+#include <linux/types.h>
+
+enum pipe;
+struct drm_i915_private;
+struct intel_plane;
+struct intel_plane_state;
+
+unsigned int i965_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation);
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+
+struct intel_plane *
+intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index b3533a32f8ba..9d245a689323 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1535,6 +1535,9 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
vdsc_cfg->convert_rgb = true;
+ /* FIXME: initialize from VBT */
+ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+
ret = intel_dsc_compute_params(encoder, crtc_state);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 7e9f84b00859..4683f98f7e54 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -312,10 +312,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
int ret;
intel_plane_set_invisible(new_crtc_state, new_plane_state);
+ new_crtc_state->enabled_planes &= ~BIT(plane->id);
if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
return 0;
+ new_crtc_state->enabled_planes |= BIT(plane->id);
+
ret = plane->check_plane(new_crtc_state, new_plane_state);
if (ret)
return ret;
@@ -449,7 +452,7 @@ void intel_update_plane(struct intel_plane *plane,
trace_intel_update_plane(&plane->base, crtc);
if (crtc_state->uapi.async_flip && plane->async_flip)
- plane->async_flip(plane, crtc_state, plane_state);
+ plane->async_flip(plane, crtc_state, plane_state, true);
else
plane->update_plane(plane, crtc_state, plane_state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 4cc949b228f2..987cf509337f 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1623,6 +1623,13 @@ static const u8 icp_ddc_pin_map[] = {
[TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP,
};
+static const u8 rkl_pch_tgp_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [RKL_DDC_BUS_DDI_D] = GMBUS_PIN_9_TC1_ICP,
+ [RKL_DDC_BUS_DDI_E] = GMBUS_PIN_10_TC2_ICP,
+};
+
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
{
const u8 *ddc_pin_map;
@@ -1630,6 +1637,9 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) {
return vbt_pin;
+ } else if (IS_ROCKETLAKE(dev_priv) && INTEL_PCH_TYPE(dev_priv) == PCH_TGP) {
+ ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
ddc_pin_map = icp_ddc_pin_map;
n_entries = ARRAY_SIZE(icp_ddc_pin_map);
@@ -2555,16 +2565,11 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
crtc_state->dsc.slice_count);
/*
- * FIXME: Use VBT rc_buffer_block_size and rc_buffer_size for the
- * implementation specific physical rate buffer size. Currently we use
- * the required rate buffer model size calculated in
- * drm_dsc_compute_rc_parameters() according to VESA DSC Annex E.
- *
* The VBT rc_buffer_block_size and rc_buffer_size definitions
- * correspond to DP 1.4 DPCD offsets 0x62 and 0x63. The DP DSC
- * implementation should also use the DPCD (or perhaps VBT for eDP)
- * provided value for the buffer size.
+ * correspond to DP 1.4 DPCD offsets 0x62 and 0x63.
*/
+ vdsc_cfg->rc_model_size = drm_dsc_dp_rc_buffer_size(dsc->rc_buffer_block_size,
+ dsc->rc_buffer_size);
/* FIXME: DSI spec says bpc + 1 for this one */
vdsc_cfg->line_buf_depth = VBT_DSC_LINE_BUFFER_DEPTH(dsc->line_buffer_depth);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index bd060404d249..4b5a30ac84bc 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -20,76 +20,9 @@ struct intel_qgv_point {
struct intel_qgv_info {
struct intel_qgv_point points[I915_NUM_QGV_POINTS];
u8 num_points;
- u8 num_channels;
u8 t_bl;
- enum intel_dram_type dram_type;
};
-static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
- struct intel_qgv_info *qi)
-{
- u32 val = 0;
- int ret;
-
- ret = sandybridge_pcode_read(dev_priv,
- ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
- &val, NULL);
- if (ret)
- return ret;
-
- if (IS_GEN(dev_priv, 12)) {
- switch (val & 0xf) {
- case 0:
- qi->dram_type = INTEL_DRAM_DDR4;
- break;
- case 3:
- qi->dram_type = INTEL_DRAM_LPDDR4;
- break;
- case 4:
- qi->dram_type = INTEL_DRAM_DDR3;
- break;
- case 5:
- qi->dram_type = INTEL_DRAM_LPDDR3;
- break;
- default:
- MISSING_CASE(val & 0xf);
- break;
- }
- } else if (IS_GEN(dev_priv, 11)) {
- switch (val & 0xf) {
- case 0:
- qi->dram_type = INTEL_DRAM_DDR4;
- break;
- case 1:
- qi->dram_type = INTEL_DRAM_DDR3;
- break;
- case 2:
- qi->dram_type = INTEL_DRAM_LPDDR3;
- break;
- case 3:
- qi->dram_type = INTEL_DRAM_LPDDR4;
- break;
- default:
- MISSING_CASE(val & 0xf);
- break;
- }
- } else {
- MISSING_CASE(INTEL_GEN(dev_priv));
- qi->dram_type = INTEL_DRAM_LPDDR3; /* Conservative default */
- }
-
- qi->num_channels = (val & 0xf0) >> 4;
- qi->num_points = (val & 0xf00) >> 8;
-
- if (IS_GEN(dev_priv, 12))
- qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 16;
- else if (IS_GEN(dev_priv, 11))
- qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
-
- return 0;
-}
-
static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
struct intel_qgv_point *sp,
int point)
@@ -139,11 +72,15 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
struct intel_qgv_info *qi)
{
+ const struct dram_info *dram_info = &dev_priv->dram_info;
int i, ret;
- ret = icl_pcode_read_mem_global_info(dev_priv, qi);
- if (ret)
- return ret;
+ qi->num_points = dram_info->num_qgv_points;
+
+ if (IS_GEN(dev_priv, 12))
+ qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 16;
+ else if (IS_GEN(dev_priv, 11))
+ qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
if (drm_WARN_ON(&dev_priv->drm,
qi->num_points > ARRAY_SIZE(qi->points)))
@@ -209,7 +146,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
{
struct intel_qgv_info qi = {};
bool is_y_tile = true; /* assume y tile may be used */
- int num_channels;
+ int num_channels = dev_priv->dram_info.num_channels;
int deinterleave;
int ipqdepth, ipqdepthpch;
int dclk_max;
@@ -222,7 +159,6 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
- num_channels = qi.num_channels;
deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
dclk_max = icl_sagv_max_dclk(&qi);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index c449d28d0560..2e878cc274b7 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -2415,8 +2415,7 @@ static int intel_modeset_all_pipes(struct intel_atomic_state *state)
if (ret)
return ret;
- ret = drm_atomic_add_affected_planes(&state->base,
- &crtc->base);
+ ret = intel_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;
@@ -2710,8 +2709,8 @@ static int dg1_rawclk(struct drm_i915_private *dev_priv)
* DG1 always uses a 38.4 MHz rawclk. The bspec tells us
* "Program Numerator=2, Denominator=4, Divider=37 decimal."
*/
- I915_WRITE(PCH_RAWCLK_FREQ,
- CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2));
+ intel_de_write(dev_priv, PCH_RAWCLK_FREQ,
+ CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2));
return 38400;
}
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 172d398081ee..ff7dcb7088bf 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -1485,6 +1485,7 @@ static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state)
static int ivb_color_check(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
bool limited_color_range = ilk_csc_limited_range(crtc_state);
int ret;
@@ -1492,6 +1493,13 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB &&
+ crtc_state->hw.ctm) {
+ drm_dbg_kms(&dev_priv->drm,
+ "YCBCR and CTM together are not possible\n");
+ return -EINVAL;
+ }
+
crtc_state->gamma_enable =
(crtc_state->hw.gamma_lut ||
crtc_state->hw.degamma_lut) &&
@@ -1525,12 +1533,20 @@ static u32 glk_gamma_mode(const struct intel_crtc_state *crtc_state)
static int glk_color_check(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int ret;
ret = check_luts(crtc_state);
if (ret)
return ret;
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB &&
+ crtc_state->hw.ctm) {
+ drm_dbg_kms(&dev_priv->drm,
+ "YCBCR and CTM together are not possible\n");
+ return -EINVAL;
+ }
+
crtc_state->gamma_enable =
crtc_state->hw.gamma_lut &&
!crtc_state->c8_planes;
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index d5ad61e4083e..996ae0608a62 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -427,10 +427,22 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
u32 val;
if (phy == PHY_A &&
- !icl_combo_phy_verify_state(dev_priv, phy))
- drm_warn(&dev_priv->drm,
- "Combo PHY %c HW state changed unexpectedly\n",
- phy_name(phy));
+ !icl_combo_phy_verify_state(dev_priv, phy)) {
+ if (IS_TIGERLAKE(dev_priv) || IS_DG1(dev_priv)) {
+ /*
+ * A known problem with old ifwi:
+ * https://gitlab.freedesktop.org/drm/intel/-/issues/2411
+ * Suppress the warning for CI. Remove ASAP!
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "Combo PHY %c HW state changed unexpectedly\n",
+ phy_name(phy));
+ } else {
+ drm_warn(&dev_priv->drm,
+ "Combo PHY %c HW state changed unexpectedly\n",
+ phy_name(phy));
+ }
+ }
if (!has_phy_misc(dev_priv, phy))
goto skip_phy_misc;
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 406e96785c76..d5ceb7bdc14b 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -279,24 +279,17 @@ intel_attach_aspect_ratio_property(struct drm_connector *connector)
}
void
-intel_attach_colorspace_property(struct drm_connector *connector)
+intel_attach_hdmi_colorspace_property(struct drm_connector *connector)
{
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_HDMIA:
- case DRM_MODE_CONNECTOR_HDMIB:
- if (drm_mode_create_hdmi_colorspace_property(connector))
- return;
- break;
- case DRM_MODE_CONNECTOR_DisplayPort:
- case DRM_MODE_CONNECTOR_eDP:
- if (drm_mode_create_dp_colorspace_property(connector))
- return;
- break;
- default:
- MISSING_CASE(connector->connector_type);
- return;
- }
+ if (!drm_mode_create_hdmi_colorspace_property(connector))
+ drm_object_attach_property(&connector->base,
+ connector->colorspace_property, 0);
+}
- drm_object_attach_property(&connector->base,
- connector->colorspace_property, 0);
+void
+intel_attach_dp_colorspace_property(struct drm_connector *connector)
+{
+ if (!drm_mode_create_dp_colorspace_property(connector))
+ drm_object_attach_property(&connector->base,
+ connector->colorspace_property, 0);
}
diff --git a/drivers/gpu/drm/i915/display/intel_connector.h b/drivers/gpu/drm/i915/display/intel_connector.h
index 93a7375c8196..661a37a3c6d8 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.h
+++ b/drivers/gpu/drm/i915/display/intel_connector.h
@@ -30,6 +30,7 @@ int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
void intel_attach_force_audio_property(struct drm_connector *connector);
void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
void intel_attach_aspect_ratio_property(struct drm_connector *connector);
-void intel_attach_colorspace_property(struct drm_connector *connector);
+void intel_attach_hdmi_colorspace_property(struct drm_connector *connector);
+void intel_attach_dp_colorspace_property(struct drm_connector *connector);
#endif /* __INTEL_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
new file mode 100644
index 000000000000..57b0a3ebe908
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_plane_helper.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_color.h"
+#include "intel_crtc.h"
+#include "intel_cursor.h"
+#include "intel_display_debugfs.h"
+#include "intel_display_types.h"
+#include "intel_pipe_crc.h"
+#include "intel_sprite.h"
+#include "i9xx_plane.h"
+
+static void assert_vblank_disabled(struct drm_crtc *crtc)
+{
+ if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
+ drm_crtc_vblank_put(crtc);
+}
+
+u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
+
+ if (!vblank->max_vblank_count)
+ return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
+
+ return crtc->base.funcs->get_vblank_counter(&crtc->base);
+}
+
+u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 mode_flags = crtc->mode_flags;
+
+ /*
+ * From Gen 11, In case of dsi cmd mode, frame counter wouldnt
+ * have updated at the beginning of TE, if we want to use
+ * the hw counter, then we would find it updated in only
+ * the next TE, hence switching to sw counter.
+ */
+ if (mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | I915_MODE_FLAG_DSI_USE_TE1))
+ return 0;
+
+ /*
+ * On i965gm the hardware frame counter reads
+ * zero when the TV encoder is enabled :(
+ */
+ if (IS_I965GM(dev_priv) &&
+ (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
+ return 0;
+
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ return 0xffffffff; /* full 32 bit counter */
+ else if (INTEL_GEN(dev_priv) >= 3)
+ return 0xffffff; /* only 24 bits of frame count */
+ else
+ return 0; /* Gen2 doesn't have a hardware frame counter */
+}
+
+void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ assert_vblank_disabled(&crtc->base);
+ drm_crtc_set_max_vblank_count(&crtc->base,
+ intel_crtc_max_vblank_count(crtc_state));
+ drm_crtc_vblank_on(&crtc->base);
+}
+
+void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ drm_crtc_vblank_off(&crtc->base);
+ assert_vblank_disabled(&crtc->base);
+}
+
+struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
+
+ if (crtc_state)
+ intel_crtc_state_reset(crtc_state, crtc);
+
+ return crtc_state;
+}
+
+void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
+ struct intel_crtc *crtc)
+{
+ memset(crtc_state, 0, sizeof(*crtc_state));
+
+ __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
+
+ crtc_state->cpu_transcoder = INVALID_TRANSCODER;
+ crtc_state->master_transcoder = INVALID_TRANSCODER;
+ crtc_state->hsw_workaround_pipe = INVALID_PIPE;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
+ crtc_state->scaler_state.scaler_id = -1;
+ crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
+}
+
+static struct intel_crtc *intel_crtc_alloc(void)
+{
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
+ if (!crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc_state = intel_crtc_state_alloc(crtc);
+ if (!crtc_state) {
+ kfree(crtc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ crtc->base.state = &crtc_state->uapi;
+ crtc->config = crtc_state;
+
+ return crtc;
+}
+
+static void intel_crtc_free(struct intel_crtc *crtc)
+{
+ intel_crtc_destroy_state(&crtc->base, crtc->base.state);
+ kfree(crtc);
+}
+
+static void intel_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(intel_crtc);
+}
+
+static int intel_crtc_late_register(struct drm_crtc *crtc)
+{
+ intel_crtc_debugfs_add(crtc);
+ return 0;
+}
+
+#define INTEL_CRTC_FUNCS \
+ .set_config = drm_atomic_helper_set_config, \
+ .destroy = intel_crtc_destroy, \
+ .page_flip = drm_atomic_helper_page_flip, \
+ .atomic_duplicate_state = intel_crtc_duplicate_state, \
+ .atomic_destroy_state = intel_crtc_destroy_state, \
+ .set_crc_source = intel_crtc_set_crc_source, \
+ .verify_crc_source = intel_crtc_verify_crc_source, \
+ .get_crc_sources = intel_crtc_get_crc_sources, \
+ .late_register = intel_crtc_late_register
+
+static const struct drm_crtc_funcs bdw_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = bdw_enable_vblank,
+ .disable_vblank = bdw_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs ilk_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = ilk_enable_vblank,
+ .disable_vblank = ilk_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs g4x_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = i965_enable_vblank,
+ .disable_vblank = i965_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i965_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i965_enable_vblank,
+ .disable_vblank = i965_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i915gm_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i915gm_enable_vblank,
+ .disable_vblank = i915gm_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i915_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i8xx_enable_vblank,
+ .disable_vblank = i8xx_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i8xx_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ /* no hw vblank counter */
+ .enable_vblank = i8xx_enable_vblank,
+ .disable_vblank = i8xx_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ struct intel_plane *primary, *cursor;
+ const struct drm_crtc_funcs *funcs;
+ struct intel_crtc *crtc;
+ int sprite, ret;
+
+ crtc = intel_crtc_alloc();
+ if (IS_ERR(crtc))
+ return PTR_ERR(crtc);
+
+ crtc->pipe = pipe;
+ crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
+
+ primary = intel_primary_plane_create(dev_priv, pipe);
+ if (IS_ERR(primary)) {
+ ret = PTR_ERR(primary);
+ goto fail;
+ }
+ crtc->plane_ids_mask |= BIT(primary->id);
+
+ for_each_sprite(dev_priv, pipe, sprite) {
+ struct intel_plane *plane;
+
+ plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
+ if (IS_ERR(plane)) {
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ crtc->plane_ids_mask |= BIT(plane->id);
+ }
+
+ cursor = intel_cursor_plane_create(dev_priv, pipe);
+ if (IS_ERR(cursor)) {
+ ret = PTR_ERR(cursor);
+ goto fail;
+ }
+ crtc->plane_ids_mask |= BIT(cursor->id);
+
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
+ funcs = &g4x_crtc_funcs;
+ else if (IS_GEN(dev_priv, 4))
+ funcs = &i965_crtc_funcs;
+ else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
+ funcs = &i915gm_crtc_funcs;
+ else if (IS_GEN(dev_priv, 3))
+ funcs = &i915_crtc_funcs;
+ else
+ funcs = &i8xx_crtc_funcs;
+ } else {
+ if (INTEL_GEN(dev_priv) >= 8)
+ funcs = &bdw_crtc_funcs;
+ else
+ funcs = &ilk_crtc_funcs;
+ }
+
+ ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
+ &primary->base, &cursor->base,
+ funcs, "pipe %c", pipe_name(pipe));
+ if (ret)
+ goto fail;
+
+ BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
+ dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
+ dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
+
+ if (INTEL_GEN(dev_priv) < 9) {
+ enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
+
+ BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+ dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
+ dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ drm_crtc_create_scaling_filter_property(&crtc->base,
+ BIT(DRM_SCALING_FILTER_DEFAULT) |
+ BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
+
+ intel_color_init(crtc);
+
+ intel_crtc_crc_init(crtc);
+
+ drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
+
+ return 0;
+
+fail:
+ intel_crtc_free(crtc);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h
new file mode 100644
index 000000000000..08112d557411
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crtc.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_CRTC_H_
+#define _INTEL_CRTC_H_
+
+#include <linux/types.h>
+
+enum pipe;
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+
+u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state);
+int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe);
+struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
+void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
+ struct intel_crtc *crtc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
new file mode 100644
index 000000000000..21fe4d2753e9
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -0,0 +1,806 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_fourcc.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_cursor.h"
+#include "intel_display_types.h"
+#include "intel_display.h"
+
+#include "intel_frontbuffer.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_sprite.h"
+
+/* Cursor formats */
+static const u32 intel_cursor_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+static const u64 cursor_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ u32 base;
+
+ if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
+ base = sg_dma_address(obj->mm.pages->sgl);
+ else
+ base = intel_plane_ggtt_offset(plane_state);
+
+ return base + plane_state->color_plane[0].offset;
+}
+
+static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
+{
+ int x = plane_state->uapi.dst.x1;
+ int y = plane_state->uapi.dst.y1;
+ u32 pos = 0;
+
+ if (x < 0) {
+ pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+ x = -x;
+ }
+ pos |= x << CURSOR_X_SHIFT;
+
+ if (y < 0) {
+ pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+ y = -y;
+ }
+ pos |= y << CURSOR_Y_SHIFT;
+
+ return pos;
+}
+
+static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+ const struct drm_mode_config *config =
+ &plane_state->uapi.plane->dev->mode_config;
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
+
+ return width > 0 && width <= config->cursor_width &&
+ height > 0 && height <= config->cursor_height;
+}
+
+static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ unsigned int rotation = plane_state->hw.rotation;
+ int src_x, src_y;
+ u32 offset;
+ int ret;
+
+ ret = intel_plane_compute_gtt(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
+
+ intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
+ offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
+ plane_state, 0);
+
+ if (src_x != 0 || src_y != 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Arbitrary cursor panning not supported\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate_to(&plane_state->uapi.src,
+ src_x << 16, src_y << 16);
+
+ /* ILK+ do this automagically in hardware */
+ if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ offset += (src_h * src_w - 1) * fb->format->cpp[0];
+ }
+
+ plane_state->color_plane[0].offset = offset;
+ plane_state->color_plane[0].x = src_x;
+ plane_state->color_plane[0].y = src_y;
+
+ return 0;
+}
+
+static int intel_check_cursor(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_rect src = plane_state->uapi.src;
+ const struct drm_rect dst = plane_state->uapi.dst;
+ int ret;
+
+ if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+ drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
+ return -EINVAL;
+ }
+
+ ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true);
+ if (ret)
+ return ret;
+
+ /* Use the unclipped src/dst rectangles, which we program to hw */
+ plane_state->uapi.src = src;
+ plane_state->uapi.dst = dst;
+
+ ret = intel_cursor_check_surface(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ ret = intel_plane_check_src_coordinates(plane_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static unsigned int
+i845_cursor_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ return 2048;
+}
+
+static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ u32 cntl = 0;
+
+ if (crtc_state->gamma_enable)
+ cntl |= CURSOR_GAMMA_ENABLE;
+
+ return cntl;
+}
+
+static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ return CURSOR_ENABLE |
+ CURSOR_FORMAT_ARGB |
+ CURSOR_STRIDE(plane_state->color_plane[0].stride);
+}
+
+static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+ int width = drm_rect_width(&plane_state->uapi.dst);
+
+ /*
+ * 845g/865g are only limited by the width of their cursors,
+ * the height is arbitrary up to the precision of the register.
+ */
+ return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
+}
+
+static int i845_check_cursor(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ int ret;
+
+ ret = intel_check_cursor(crtc_state, plane_state);
+ if (ret)
+ return ret;
+
+ /* if we want to turn off the cursor ignore width and height */
+ if (!fb)
+ return 0;
+
+ /* Check for which cursor types we support */
+ if (!i845_cursor_size_ok(plane_state)) {
+ drm_dbg_kms(&i915->drm,
+ "Cursor dimension %dx%d not supported\n",
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
+ return -EINVAL;
+ }
+
+ drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
+ plane_state->color_plane[0].stride != fb->pitches[0]);
+
+ switch (fb->pitches[0]) {
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ break;
+ default:
+ drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
+ fb->pitches[0]);
+ return -EINVAL;
+ }
+
+ plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
+
+ return 0;
+}
+
+static void i845_update_cursor(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ u32 cntl = 0, base = 0, pos = 0, size = 0;
+ unsigned long irqflags;
+
+ if (plane_state && plane_state->uapi.visible) {
+ unsigned int width = drm_rect_width(&plane_state->uapi.dst);
+ unsigned int height = drm_rect_height(&plane_state->uapi.dst);
+
+ cntl = plane_state->ctl |
+ i845_cursor_ctl_crtc(crtc_state);
+
+ size = (height << 12) | width;
+
+ base = intel_cursor_base(plane_state);
+ pos = intel_cursor_position(plane_state);
+ }
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ /* On these chipsets we can only modify the base/size/stride
+ * whilst the cursor is disabled.
+ */
+ if (plane->cursor.base != base ||
+ plane->cursor.size != size ||
+ plane->cursor.cntl != cntl) {
+ intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
+ intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
+ intel_de_write_fw(dev_priv, CURSIZE, size);
+ intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
+ intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
+
+ plane->cursor.base = base;
+ plane->cursor.size = size;
+ plane->cursor.cntl = cntl;
+ } else {
+ intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void i845_disable_cursor(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ i845_update_cursor(plane, crtc_state, NULL);
+}
+
+static bool i845_cursor_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(PIPE_A);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+
+ *pipe = PIPE_A;
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static unsigned int
+i9xx_cursor_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ return plane->base.dev->mode_config.cursor_width * 4;
+}
+
+static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 cntl = 0;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ return cntl;
+
+ if (crtc_state->gamma_enable)
+ cntl = MCURSOR_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ cntl |= MCURSOR_PIPE_CSC_ENABLE;
+
+ if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+ cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
+
+ return cntl;
+}
+
+static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ u32 cntl = 0;
+
+ if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+ cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
+
+ switch (drm_rect_width(&plane_state->uapi.dst)) {
+ case 64:
+ cntl |= MCURSOR_MODE_64_ARGB_AX;
+ break;
+ case 128:
+ cntl |= MCURSOR_MODE_128_ARGB_AX;
+ break;
+ case 256:
+ cntl |= MCURSOR_MODE_256_ARGB_AX;
+ break;
+ default:
+ MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
+ return 0;
+ }
+
+ if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
+ cntl |= MCURSOR_ROTATE_180;
+
+ return cntl;
+}
+
+static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
+
+ if (!intel_cursor_size_ok(plane_state))
+ return false;
+
+ /* Cursor width is limited to a few power-of-two sizes */
+ switch (width) {
+ case 256:
+ case 128:
+ case 64:
+ break;
+ default:
+ return false;
+ }
+
+ /*
+ * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
+ * height from 8 lines up to the cursor width, when the
+ * cursor is not rotated. Everything else requires square
+ * cursors.
+ */
+ if (HAS_CUR_FBC(dev_priv) &&
+ plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
+ if (height < 8 || height > width)
+ return false;
+ } else {
+ if (height != width)
+ return false;
+ }
+
+ return true;
+}
+
+static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ enum pipe pipe = plane->pipe;
+ int ret;
+
+ ret = intel_check_cursor(crtc_state, plane_state);
+ if (ret)
+ return ret;
+
+ /* if we want to turn off the cursor ignore width and height */
+ if (!fb)
+ return 0;
+
+ /* Check for which cursor types we support */
+ if (!i9xx_cursor_size_ok(plane_state)) {
+ drm_dbg(&dev_priv->drm,
+ "Cursor dimension %dx%d not supported\n",
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
+ return -EINVAL;
+ }
+
+ drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
+ plane_state->color_plane[0].stride != fb->pitches[0]);
+
+ if (fb->pitches[0] !=
+ drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid cursor stride (%u) (cursor width %d)\n",
+ fb->pitches[0],
+ drm_rect_width(&plane_state->uapi.dst));
+ return -EINVAL;
+ }
+
+ /*
+ * There's something wrong with the cursor on CHV pipe C.
+ * If it straddles the left edge of the screen then
+ * moving it away from the edge or disabling it often
+ * results in a pipe underrun, and often that can lead to
+ * dead pipe (constant underrun reported, and it scans
+ * out just a solid color). To recover from that, the
+ * display power well must be turned off and on again.
+ * Refuse the put the cursor into that compromised position.
+ */
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
+ plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "CHV cursor C not allowed to straddle the left screen edge\n");
+ return -EINVAL;
+ }
+
+ plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
+
+ return 0;
+}
+
+static void i9xx_update_cursor(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
+ unsigned long irqflags;
+
+ if (plane_state && plane_state->uapi.visible) {
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
+
+ cntl = plane_state->ctl |
+ i9xx_cursor_ctl_crtc(crtc_state);
+
+ if (width != height)
+ fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
+
+ base = intel_cursor_base(plane_state);
+ pos = intel_cursor_position(plane_state);
+ }
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ /*
+ * On some platforms writing CURCNTR first will also
+ * cause CURPOS to be armed by the CURBASE write.
+ * Without the CURCNTR write the CURPOS write would
+ * arm itself. Thus we always update CURCNTR before
+ * CURPOS.
+ *
+ * On other platforms CURPOS always requires the
+ * CURBASE write to arm the update. Additonally
+ * a write to any of the cursor register will cancel
+ * an already armed cursor update. Thus leaving out
+ * the CURBASE write after CURPOS could lead to a
+ * cursor that doesn't appear to move, or even change
+ * shape. Thus we always write CURBASE.
+ *
+ * The other registers are armed by the CURBASE write
+ * except when the plane is getting enabled at which time
+ * the CURCNTR write arms the update.
+ */
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_write_cursor_wm(plane, crtc_state);
+
+ if (!intel_crtc_needs_modeset(crtc_state))
+ intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0);
+
+ if (plane->cursor.base != base ||
+ plane->cursor.size != fbc_ctl ||
+ plane->cursor.cntl != cntl) {
+ if (HAS_CUR_FBC(dev_priv))
+ intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
+ fbc_ctl);
+ intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
+ intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
+ intel_de_write_fw(dev_priv, CURBASE(pipe), base);
+
+ plane->cursor.base = base;
+ plane->cursor.size = fbc_ctl;
+ plane->cursor.cntl = cntl;
+ } else {
+ intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
+ intel_de_write_fw(dev_priv, CURBASE(pipe), base);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void i9xx_disable_cursor(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ i9xx_update_cursor(plane, crtc_state, NULL);
+}
+
+static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ bool ret;
+ u32 val;
+
+ /*
+ * Not 100% correct for planes that can move between pipes,
+ * but that's only the case for gen2-3 which don't have any
+ * display power wells.
+ */
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
+
+ ret = val & MCURSOR_MODE;
+
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ *pipe = plane->pipe;
+ else
+ *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
+ MCURSOR_PIPE_SELECT_SHIFT;
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ return modifier == DRM_FORMAT_MOD_LINEAR &&
+ format == DRM_FORMAT_ARGB8888;
+}
+
+static int
+intel_legacy_cursor_update(struct drm_plane *_plane,
+ struct drm_crtc *_crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct intel_plane *plane = to_intel_plane(_plane);
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ struct intel_plane_state *old_plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_plane_state *new_plane_state;
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_crtc_state *new_crtc_state;
+ int ret;
+
+ /*
+ * When crtc is inactive or there is a modeset pending,
+ * wait for it to complete in the slowpath
+ *
+ * FIXME bigjoiner fastpath would be good
+ */
+ if (!crtc_state->hw.active || intel_crtc_needs_modeset(crtc_state) ||
+ crtc_state->update_pipe || crtc_state->bigjoiner)
+ goto slow;
+
+ /*
+ * Don't do an async update if there is an outstanding commit modifying
+ * the plane. This prevents our async update's changes from getting
+ * overridden by a previous synchronous update's state.
+ */
+ if (old_plane_state->uapi.commit &&
+ !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
+ goto slow;
+
+ /*
+ * If any parameters change that may affect watermarks,
+ * take the slowpath. Only changing fb or position should be
+ * in the fastpath.
+ */
+ if (old_plane_state->uapi.crtc != &crtc->base ||
+ old_plane_state->uapi.src_w != src_w ||
+ old_plane_state->uapi.src_h != src_h ||
+ old_plane_state->uapi.crtc_w != crtc_w ||
+ old_plane_state->uapi.crtc_h != crtc_h ||
+ !old_plane_state->uapi.fb != !fb)
+ goto slow;
+
+ new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
+ if (!new_plane_state)
+ return -ENOMEM;
+
+ new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
+ if (!new_crtc_state) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
+
+ new_plane_state->uapi.src_x = src_x;
+ new_plane_state->uapi.src_y = src_y;
+ new_plane_state->uapi.src_w = src_w;
+ new_plane_state->uapi.src_h = src_h;
+ new_plane_state->uapi.crtc_x = crtc_x;
+ new_plane_state->uapi.crtc_y = crtc_y;
+ new_plane_state->uapi.crtc_w = crtc_w;
+ new_plane_state->uapi.crtc_h = crtc_h;
+
+ intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state, crtc);
+
+ ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
+ old_plane_state, new_plane_state);
+ if (ret)
+ goto out_free;
+
+ ret = intel_plane_pin_fb(new_plane_state);
+ if (ret)
+ goto out_free;
+
+ intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
+ ORIGIN_FLIP);
+ intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
+ to_intel_frontbuffer(new_plane_state->hw.fb),
+ plane->frontbuffer_bit);
+
+ /* Swap plane state */
+ plane->base.state = &new_plane_state->uapi;
+
+ /*
+ * We cannot swap crtc_state as it may be in use by an atomic commit or
+ * page flip that's running simultaneously. If we swap crtc_state and
+ * destroy the old state, we will cause a use-after-free there.
+ *
+ * Only update active_planes, which is needed for our internal
+ * bookkeeping. Either value will do the right thing when updating
+ * planes atomically. If the cursor was part of the atomic update then
+ * we would have taken the slowpath.
+ */
+ crtc_state->active_planes = new_crtc_state->active_planes;
+
+ if (new_plane_state->uapi.visible)
+ intel_update_plane(plane, crtc_state, new_plane_state);
+ else
+ intel_disable_plane(plane, crtc_state);
+
+ intel_plane_unpin_fb(old_plane_state);
+
+out_free:
+ if (new_crtc_state)
+ intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
+ if (ret)
+ intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
+ else
+ intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
+ return ret;
+
+slow:
+ return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h, ctx);
+}
+
+static const struct drm_plane_funcs intel_cursor_plane_funcs = {
+ .update_plane = intel_legacy_cursor_update,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = intel_cursor_format_mod_supported,
+};
+
+struct intel_plane *
+intel_cursor_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct intel_plane *cursor;
+ int ret, zpos;
+
+ cursor = intel_plane_alloc();
+ if (IS_ERR(cursor))
+ return cursor;
+
+ cursor->pipe = pipe;
+ cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
+ cursor->id = PLANE_CURSOR;
+ cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
+
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
+ cursor->max_stride = i845_cursor_max_stride;
+ cursor->update_plane = i845_update_cursor;
+ cursor->disable_plane = i845_disable_cursor;
+ cursor->get_hw_state = i845_cursor_get_hw_state;
+ cursor->check_plane = i845_check_cursor;
+ } else {
+ cursor->max_stride = i9xx_cursor_max_stride;
+ cursor->update_plane = i9xx_update_cursor;
+ cursor->disable_plane = i9xx_disable_cursor;
+ cursor->get_hw_state = i9xx_cursor_get_hw_state;
+ cursor->check_plane = i9xx_check_cursor;
+ }
+
+ cursor->cursor.base = ~0;
+ cursor->cursor.cntl = ~0;
+
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
+ cursor->cursor.size = ~0;
+
+ ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
+ 0, &intel_cursor_plane_funcs,
+ intel_cursor_formats,
+ ARRAY_SIZE(intel_cursor_formats),
+ cursor_format_modifiers,
+ DRM_PLANE_TYPE_CURSOR,
+ "cursor %c", pipe_name(pipe));
+ if (ret)
+ goto fail;
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ drm_plane_create_rotation_property(&cursor->base,
+ DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_ROTATE_180);
+
+ zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
+ drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ drm_plane_enable_fb_damage_clips(&cursor->base);
+
+ drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
+
+ return cursor;
+
+fail:
+ intel_plane_free(cursor);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.h b/drivers/gpu/drm/i915/display/intel_cursor.h
new file mode 100644
index 000000000000..ce333bf4c2d5
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cursor.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_CURSOR_H_
+#define _INTEL_CURSOR_H_
+
+enum pipe;
+struct drm_i915_private;
+struct intel_plane;
+
+struct intel_plane *
+intel_cursor_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index dc13d1814d95..1bb40ec5fe5d 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -46,10 +46,12 @@
#include "intel_hotplug.h"
#include "intel_lspcon.h"
#include "intel_panel.h"
+#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
+#include "intel_vrr.h"
struct ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
@@ -611,6 +613,34 @@ static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2[]
{ 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
};
+static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
+ { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
+ { 0xC, 0x60, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
+ { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
+ { 0xC, 0x58, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
struct icl_mg_phy_ddi_buf_trans {
u32 cri_txdeemph_override_11_6;
u32 cri_txdeemph_override_5_0;
@@ -766,6 +796,34 @@ static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_ho
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 1 */
};
+static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x2F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7D, 0x2A, 0x00, 0x15 }, /* 350 900 8.2 */
+ { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x6E, 0x3E, 0x00, 0x01 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x50, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
+ { 0xC, 0x61, 0x33, 0x00, 0x0C }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2E, 0x00, 0x11 }, /* 350 900 8.2 */
+ { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x5F, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x5F, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7E, 0x36, 0x00, 0x09 }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
static bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table)
{
return table == tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
@@ -1093,6 +1151,12 @@ icl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
} else if (dev_priv->vbt.edp.low_vswing) {
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
return icl_combo_phy_ddi_translations_edp_hbr2;
+ } else if (IS_DG1(dev_priv) && crtc_state->port_clock > 270000) {
+ *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_hbr2_hbr3);
+ return dg1_combo_phy_ddi_translations_dp_hbr2_hbr3;
+ } else if (IS_DG1(dev_priv)) {
+ *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_rbr_hbr);
+ return dg1_combo_phy_ddi_translations_dp_rbr_hbr;
}
return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
@@ -1259,7 +1323,10 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (crtc_state->port_clock > 270000) {
- if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
+ if (IS_ROCKETLAKE(dev_priv)) {
+ *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr2_hbr3);
+ return rkl_combo_phy_ddi_translations_dp_hbr2_hbr3;
+ } else if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
*n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2);
return tgl_uy_combo_phy_ddi_translations_dp_hbr2;
} else {
@@ -1267,8 +1334,13 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
return tgl_combo_phy_ddi_translations_dp_hbr2;
}
} else {
- *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
- return tgl_combo_phy_ddi_translations_dp_hbr;
+ if (IS_ROCKETLAKE(dev_priv)) {
+ *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr);
+ return rkl_combo_phy_ddi_translations_dp_hbr;
+ } else {
+ *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
+ return tgl_combo_phy_ddi_translations_dp_hbr;
+ }
}
}
@@ -2029,9 +2101,9 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
}
}
-int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
- enum transcoder cpu_transcoder,
- bool enable)
+int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
+ enum transcoder cpu_transcoder,
+ bool enable, u32 hdcp_mask)
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2046,9 +2118,9 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (enable)
- tmp |= TRANS_DDI_HDCP_SIGNALLING;
+ tmp |= hdcp_mask;
else
- tmp &= ~TRANS_DDI_HDCP_SIGNALLING;
+ tmp &= ~hdcp_mask;
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), tmp);
intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return ret;
@@ -2285,18 +2357,23 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
dig_port = enc_to_dig_port(encoder);
if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT)
- intel_display_power_get(dev_priv,
- dig_port->ddi_io_power_domain);
+ dig_port->tc_mode != TC_PORT_TBT_ALT) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
+ }
/*
* AUX power is only needed for (e)DP mode, and for HDMI mode on TC
* ports.
*/
if (intel_crtc_has_dp_encoder(crtc_state) ||
- intel_phy_is_tc(dev_priv, phy))
- intel_display_power_get(dev_priv,
- intel_ddi_main_link_aux_domain(dig_port));
+ intel_phy_is_tc(dev_priv, phy)) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->aux_wakeref);
+ dig_port->aux_wakeref =
+ intel_display_power_get(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
+ }
}
void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
@@ -2626,15 +2703,11 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
ddi_translations = ehl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
else
ddi_translations = icl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
- if (!ddi_translations)
- return;
- if (level >= n_entries) {
- drm_dbg_kms(&dev_priv->drm,
- "DDI translation not found for level %d. Using %d instead.",
- level, n_entries - 1);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ return;
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
- }
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2758,12 +2831,11 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
return;
ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
- if (level >= n_entries) {
- drm_dbg_kms(&dev_priv->drm,
- "DDI translation not found for level %d. Using %d instead.",
- level, n_entries - 1);
+
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ return;
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
- }
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
for (ln = 0; ln < 2; ln++) {
@@ -2898,7 +2970,9 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
- if (level >= n_entries)
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ return;
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK |
@@ -3485,6 +3559,22 @@ i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
return DP_TP_STATUS(encoder->port);
}
+static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (!crtc_state->vrr.enable)
+ return;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_DOWNSPREAD_CTRL,
+ enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0) <= 0)
+ drm_dbg_kms(&i915->drm,
+ "Failed to set MSA_TIMING_PAR_IGNORE %s in the sink\n",
+ enable ? "enable" : "disable");
+}
+
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -3512,12 +3602,6 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
val |= DP_TP_CTL_FEC_ENABLE;
intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
-
- if (intel_de_wait_for_set(dev_priv,
- dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
- drm_err(&dev_priv->drm,
- "Timed out waiting for FEC Enable Status\n");
}
static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
@@ -3578,7 +3662,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
*/
/* 2. Enable Panel Power if PPS is required */
- intel_edp_panel_on(intel_dp);
+ intel_pps_on(intel_dp);
/*
* 3. For non-TBT Type-C ports, set FIA lane count
@@ -3599,9 +3683,11 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT)
- intel_display_power_get(dev_priv,
- dig_port->ddi_io_power_domain);
+ dig_port->tc_mode != TC_PORT_TBT_ALT) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
+ }
/* 6. Program DP_MODE */
icl_program_mg_dp_mode(dig_port, crtc_state);
@@ -3658,6 +3744,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
+ intel_dp_configure_protocol_converter(intel_dp, crtc_state);
intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
/*
* DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
@@ -3666,6 +3753,9 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
*/
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+ intel_dp_check_frl_training(intel_dp);
+ intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
+
/*
* 7.i Follow DisplayPort specification training sequence (see notes for
* failure handling)
@@ -3708,14 +3798,16 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
crtc_state->port_clock,
crtc_state->lane_count);
- intel_edp_panel_on(intel_dp);
+ intel_pps_on(intel_dp);
intel_ddi_clk_select(encoder, crtc_state);
if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT)
- intel_display_power_get(dev_priv,
- dig_port->ddi_io_power_domain);
+ dig_port->tc_mode != TC_PORT_TBT_ALT) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
+ }
icl_program_mg_dp_mode(dig_port, crtc_state);
@@ -3786,7 +3878,9 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
intel_ddi_clk_select(encoder, crtc_state);
- intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+ drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
icl_program_mg_dp_mode(dig_port, crtc_state);
@@ -3939,13 +4033,14 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
if (INTEL_GEN(dev_priv) >= 12)
intel_ddi_disable_pipe_clock(old_crtc_state);
- intel_edp_panel_vdd_on(intel_dp);
- intel_edp_panel_off(intel_dp);
+ intel_pps_vdd_on(intel_dp);
+ intel_pps_off(intel_dp);
if (!intel_phy_is_tc(dev_priv, phy) ||
dig_port->tc_mode != TC_PORT_TBT_ALT)
- intel_display_power_put_unchecked(dev_priv,
- dig_port->ddi_io_power_domain);
+ intel_display_power_put(dev_priv,
+ dig_port->ddi_io_power_domain,
+ fetch_and_zero(&dig_port->ddi_io_wakeref));
intel_ddi_clk_disable(encoder);
}
@@ -3966,8 +4061,9 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
intel_disable_ddi_buf(encoder, old_crtc_state);
- intel_display_power_put_unchecked(dev_priv,
- dig_port->ddi_io_power_domain);
+ intel_display_power_put(dev_priv,
+ dig_port->ddi_io_power_domain,
+ fetch_and_zero(&dig_port->ddi_io_wakeref));
intel_ddi_clk_disable(encoder);
@@ -3989,6 +4085,8 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
intel_disable_pipe(old_crtc_state);
+ intel_vrr_disable(old_crtc_state);
+
intel_ddi_disable_transcoder_func(old_crtc_state);
intel_dsc_disable(old_crtc_state);
@@ -4040,8 +4138,9 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
icl_unmap_plls_to_ports(encoder);
if (intel_crtc_has_dp_encoder(old_crtc_state) || is_tc_port)
- intel_display_power_put_unchecked(dev_priv,
- intel_ddi_main_link_aux_domain(dig_port));
+ intel_display_power_put(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port),
+ fetch_and_zero(&dig_port->aux_wakeref));
if (is_tc_port)
intel_tc_port_put_link(dig_port);
@@ -4126,6 +4225,7 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum port port = encoder->port;
if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
@@ -4133,7 +4233,10 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
intel_edp_backlight_on(crtc_state, conn_state);
intel_psr_enable(intel_dp, crtc_state, conn_state);
- intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
+
+ if (!dig_port->lspcon.active || dig_port->dp.has_hdmi_sink)
+ intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
+
intel_edp_drrs_enable(intel_dp, crtc_state);
if (crtc_state->has_audio)
@@ -4237,6 +4340,8 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
if (!crtc_state->bigjoiner_slave)
intel_ddi_enable_transcoder_func(encoder, crtc_state);
+ intel_vrr_enable(encoder, crtc_state);
+
intel_enable_pipe(crtc_state);
intel_crtc_vblank_on(crtc_state);
@@ -4250,7 +4355,7 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_DESIRED)
intel_hdcp_enable(to_intel_connector(conn_state->connector),
- crtc_state->cpu_transcoder,
+ crtc_state,
(u8)conn_state->hdcp_content_type);
}
@@ -4273,6 +4378,9 @@ static void intel_disable_ddi_dp(struct intel_atomic_state *state,
/* Disable the decompression in DP Sink */
intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
false);
+ /* Disable Ignore_MSA bit in DP Sink */
+ intel_dp_sink_set_msa_timing_par_ignore_state(intel_dp, old_crtc_state,
+ false);
}
static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
@@ -4378,9 +4486,12 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
if (is_tc_port)
intel_tc_port_get_link(dig_port, crtc_state->lane_count);
- if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port)
- intel_display_power_get(dev_priv,
- intel_ddi_main_link_aux_domain(dig_port));
+ if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->aux_wakeref);
+ dig_port->aux_wakeref =
+ intel_display_power_get(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
+ }
if (is_tc_port && dig_port->tc_mode != TC_PORT_TBT_ALT)
/*
@@ -4593,6 +4704,7 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u32 temp, flags = 0;
temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
@@ -4667,9 +4779,12 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
pipe_config->fec_enable);
}
- pipe_config->infoframes.enable |=
- intel_hdmi_infoframes_enabled(encoder, pipe_config);
-
+ if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink)
+ pipe_config->infoframes.enable |=
+ intel_lspcon_infoframes_enabled(encoder, pipe_config);
+ else
+ pipe_config->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, pipe_config);
break;
case TRANS_DDI_MODE_SELECT_DP_MST:
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
@@ -4956,6 +5071,8 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
intel_dp_encoder_flush_work(encoder);
drm_encoder_cleanup(encoder);
+ if (dig_port)
+ kfree(dig_port->hdcp_port_data.streams);
kfree(dig_port);
}
@@ -5109,12 +5226,20 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = &dig_port->dp;
enum phy phy = intel_port_to_phy(i915, encoder->port);
bool is_tc = intel_phy_is_tc(i915, phy);
struct drm_modeset_acquire_ctx ctx;
enum intel_hotplug_state state;
int ret;
+ if (intel_dp->compliance.test_active &&
+ intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) {
+ intel_dp_phy_test(encoder);
+ /* just do the PHY test and nothing else */
+ return INTEL_HOTPLUG_UNCHANGED;
+ }
+
state = intel_encoder_hotplug(encoder, connector);
drm_modeset_acquire_init(&ctx, 0);
@@ -5272,8 +5397,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
static bool hti_uses_phy(struct drm_i915_private *i915, enum phy phy)
{
return i915->hti_state & HDPORT_ENABLED &&
- (i915->hti_state & HDPORT_PHY_USED_DP(phy) ||
- i915->hti_state & HDPORT_PHY_USED_HDMI(phy));
+ i915->hti_state & HDPORT_DDI_USED(phy);
}
static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index dcc711cfe4fe..a4dd815c0000 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -50,9 +50,9 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
u32 ddi_signal_levels(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
-int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
- enum transcoder cpu_transcoder,
- bool enable);
+int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
+ enum transcoder cpu_transcoder,
+ bool enable, u32 hdcp_mask);
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
#endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 61be6bed9162..9843a0f202a0 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -45,8 +45,10 @@
#include "display/intel_crt.h"
#include "display/intel_ddi.h"
+#include "display/intel_display_debugfs.h"
#include "display/intel_dp.h"
#include "display/intel_dp_mst.h"
+#include "display/intel_dpll.h"
#include "display/intel_dpll_mgr.h"
#include "display/intel_dsi.h"
#include "display/intel_dvo.h"
@@ -56,6 +58,9 @@
#include "display/intel_sdvo.h"
#include "display/intel_tv.h"
#include "display/intel_vdsc.h"
+#include "display/intel_vrr.h"
+
+#include "gem/i915_gem_object.h"
#include "gt/intel_rps.h"
@@ -67,10 +72,12 @@
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_color.h"
+#include "intel_crtc.h"
#include "intel_csr.h"
#include "intel_display_types.h"
#include "intel_dp_link_training.h"
#include "intel_fbc.h"
+#include "intel_fdi.h"
#include "intel_fbdev.h"
#include "intel_fifo_underrun.h"
#include "intel_frontbuffer.h"
@@ -79,72 +86,14 @@
#include "intel_overlay.h"
#include "intel_pipe_crc.h"
#include "intel_pm.h"
+#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_sideband.h"
#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vga.h"
-
-/* Primary plane formats for gen <= 3 */
-static const u32 i8xx_primary_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
-};
-
-/* Primary plane formats for ivb (no fp16 due to hw issue) */
-static const u32 ivb_primary_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
-};
-
-/* Primary plane formats for gen >= 4, except ivb */
-static const u32 i965_primary_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_XBGR16161616F,
-};
-
-/* Primary plane formats for vlv/chv */
-static const u32 vlv_primary_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_ARGB2101010,
- DRM_FORMAT_ABGR2101010,
- DRM_FORMAT_XBGR16161616F,
-};
-
-static const u64 i9xx_format_modifiers[] = {
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-/* Cursor formats */
-static const u32 intel_cursor_formats[] = {
- DRM_FORMAT_ARGB8888,
-};
-
-static const u64 cursor_format_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
+#include "i9xx_plane.h"
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
@@ -171,18 +120,6 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
-static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
-
-struct intel_limit {
- struct {
- int min, max;
- } dot, vco, n, m, m1, m2, p, p1;
-
- struct {
- int dot_limit;
- int p2_slow, p2_fast;
- } p2;
-};
/* returns HPLL frequency in kHz */
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
@@ -241,281 +178,6 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv)
dev_priv->czclk_freq);
}
-/* units of 100MHz */
-static u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *pipe_config)
-{
- if (HAS_DDI(dev_priv))
- return pipe_config->port_clock; /* SPLL */
- else
- return dev_priv->fdi_pll_freq;
-}
-
-static const struct intel_limit intel_limits_i8xx_dac = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 908000, .max = 1512000 },
- .n = { .min = 2, .max = 16 },
- .m = { .min = 96, .max = 140 },
- .m1 = { .min = 18, .max = 26 },
- .m2 = { .min = 6, .max = 16 },
- .p = { .min = 4, .max = 128 },
- .p1 = { .min = 2, .max = 33 },
- .p2 = { .dot_limit = 165000,
- .p2_slow = 4, .p2_fast = 2 },
-};
-
-static const struct intel_limit intel_limits_i8xx_dvo = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 908000, .max = 1512000 },
- .n = { .min = 2, .max = 16 },
- .m = { .min = 96, .max = 140 },
- .m1 = { .min = 18, .max = 26 },
- .m2 = { .min = 6, .max = 16 },
- .p = { .min = 4, .max = 128 },
- .p1 = { .min = 2, .max = 33 },
- .p2 = { .dot_limit = 165000,
- .p2_slow = 4, .p2_fast = 4 },
-};
-
-static const struct intel_limit intel_limits_i8xx_lvds = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 908000, .max = 1512000 },
- .n = { .min = 2, .max = 16 },
- .m = { .min = 96, .max = 140 },
- .m1 = { .min = 18, .max = 26 },
- .m2 = { .min = 6, .max = 16 },
- .p = { .min = 4, .max = 128 },
- .p1 = { .min = 1, .max = 6 },
- .p2 = { .dot_limit = 165000,
- .p2_slow = 14, .p2_fast = 7 },
-};
-
-static const struct intel_limit intel_limits_i9xx_sdvo = {
- .dot = { .min = 20000, .max = 400000 },
- .vco = { .min = 1400000, .max = 2800000 },
- .n = { .min = 1, .max = 6 },
- .m = { .min = 70, .max = 120 },
- .m1 = { .min = 8, .max = 18 },
- .m2 = { .min = 3, .max = 7 },
- .p = { .min = 5, .max = 80 },
- .p1 = { .min = 1, .max = 8 },
- .p2 = { .dot_limit = 200000,
- .p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit intel_limits_i9xx_lvds = {
- .dot = { .min = 20000, .max = 400000 },
- .vco = { .min = 1400000, .max = 2800000 },
- .n = { .min = 1, .max = 6 },
- .m = { .min = 70, .max = 120 },
- .m1 = { .min = 8, .max = 18 },
- .m2 = { .min = 3, .max = 7 },
- .p = { .min = 7, .max = 98 },
- .p1 = { .min = 1, .max = 8 },
- .p2 = { .dot_limit = 112000,
- .p2_slow = 14, .p2_fast = 7 },
-};
-
-
-static const struct intel_limit intel_limits_g4x_sdvo = {
- .dot = { .min = 25000, .max = 270000 },
- .vco = { .min = 1750000, .max = 3500000},
- .n = { .min = 1, .max = 4 },
- .m = { .min = 104, .max = 138 },
- .m1 = { .min = 17, .max = 23 },
- .m2 = { .min = 5, .max = 11 },
- .p = { .min = 10, .max = 30 },
- .p1 = { .min = 1, .max = 3},
- .p2 = { .dot_limit = 270000,
- .p2_slow = 10,
- .p2_fast = 10
- },
-};
-
-static const struct intel_limit intel_limits_g4x_hdmi = {
- .dot = { .min = 22000, .max = 400000 },
- .vco = { .min = 1750000, .max = 3500000},
- .n = { .min = 1, .max = 4 },
- .m = { .min = 104, .max = 138 },
- .m1 = { .min = 16, .max = 23 },
- .m2 = { .min = 5, .max = 11 },
- .p = { .min = 5, .max = 80 },
- .p1 = { .min = 1, .max = 8},
- .p2 = { .dot_limit = 165000,
- .p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
- .dot = { .min = 20000, .max = 115000 },
- .vco = { .min = 1750000, .max = 3500000 },
- .n = { .min = 1, .max = 3 },
- .m = { .min = 104, .max = 138 },
- .m1 = { .min = 17, .max = 23 },
- .m2 = { .min = 5, .max = 11 },
- .p = { .min = 28, .max = 112 },
- .p1 = { .min = 2, .max = 8 },
- .p2 = { .dot_limit = 0,
- .p2_slow = 14, .p2_fast = 14
- },
-};
-
-static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
- .dot = { .min = 80000, .max = 224000 },
- .vco = { .min = 1750000, .max = 3500000 },
- .n = { .min = 1, .max = 3 },
- .m = { .min = 104, .max = 138 },
- .m1 = { .min = 17, .max = 23 },
- .m2 = { .min = 5, .max = 11 },
- .p = { .min = 14, .max = 42 },
- .p1 = { .min = 2, .max = 6 },
- .p2 = { .dot_limit = 0,
- .p2_slow = 7, .p2_fast = 7
- },
-};
-
-static const struct intel_limit pnv_limits_sdvo = {
- .dot = { .min = 20000, .max = 400000},
- .vco = { .min = 1700000, .max = 3500000 },
- /* Pineview's Ncounter is a ring counter */
- .n = { .min = 3, .max = 6 },
- .m = { .min = 2, .max = 256 },
- /* Pineview only has one combined m divider, which we treat as m2. */
- .m1 = { .min = 0, .max = 0 },
- .m2 = { .min = 0, .max = 254 },
- .p = { .min = 5, .max = 80 },
- .p1 = { .min = 1, .max = 8 },
- .p2 = { .dot_limit = 200000,
- .p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit pnv_limits_lvds = {
- .dot = { .min = 20000, .max = 400000 },
- .vco = { .min = 1700000, .max = 3500000 },
- .n = { .min = 3, .max = 6 },
- .m = { .min = 2, .max = 256 },
- .m1 = { .min = 0, .max = 0 },
- .m2 = { .min = 0, .max = 254 },
- .p = { .min = 7, .max = 112 },
- .p1 = { .min = 1, .max = 8 },
- .p2 = { .dot_limit = 112000,
- .p2_slow = 14, .p2_fast = 14 },
-};
-
-/* Ironlake / Sandybridge
- *
- * We calculate clock using (register_value + 2) for N/M1/M2, so here
- * the range value for them is (actual_value - 2).
- */
-static const struct intel_limit ilk_limits_dac = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000 },
- .n = { .min = 1, .max = 5 },
- .m = { .min = 79, .max = 127 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 5, .max = 80 },
- .p1 = { .min = 1, .max = 8 },
- .p2 = { .dot_limit = 225000,
- .p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit ilk_limits_single_lvds = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000 },
- .n = { .min = 1, .max = 3 },
- .m = { .min = 79, .max = 118 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 28, .max = 112 },
- .p1 = { .min = 2, .max = 8 },
- .p2 = { .dot_limit = 225000,
- .p2_slow = 14, .p2_fast = 14 },
-};
-
-static const struct intel_limit ilk_limits_dual_lvds = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000 },
- .n = { .min = 1, .max = 3 },
- .m = { .min = 79, .max = 127 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 14, .max = 56 },
- .p1 = { .min = 2, .max = 8 },
- .p2 = { .dot_limit = 225000,
- .p2_slow = 7, .p2_fast = 7 },
-};
-
-/* LVDS 100mhz refclk limits. */
-static const struct intel_limit ilk_limits_single_lvds_100m = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000 },
- .n = { .min = 1, .max = 2 },
- .m = { .min = 79, .max = 126 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 28, .max = 112 },
- .p1 = { .min = 2, .max = 8 },
- .p2 = { .dot_limit = 225000,
- .p2_slow = 14, .p2_fast = 14 },
-};
-
-static const struct intel_limit ilk_limits_dual_lvds_100m = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000 },
- .n = { .min = 1, .max = 3 },
- .m = { .min = 79, .max = 126 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 14, .max = 42 },
- .p1 = { .min = 2, .max = 6 },
- .p2 = { .dot_limit = 225000,
- .p2_slow = 7, .p2_fast = 7 },
-};
-
-static const struct intel_limit intel_limits_vlv = {
- /*
- * These are the data rate limits (measured in fast clocks)
- * since those are the strictest limits we have. The fast
- * clock and actual rate limits are more relaxed, so checking
- * them would make no difference.
- */
- .dot = { .min = 25000 * 5, .max = 270000 * 5 },
- .vco = { .min = 4000000, .max = 6000000 },
- .n = { .min = 1, .max = 7 },
- .m1 = { .min = 2, .max = 3 },
- .m2 = { .min = 11, .max = 156 },
- .p1 = { .min = 2, .max = 3 },
- .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
-};
-
-static const struct intel_limit intel_limits_chv = {
- /*
- * These are the data rate limits (measured in fast clocks)
- * since those are the strictest limits we have. The fast
- * clock and actual rate limits are more relaxed, so checking
- * them would make no difference.
- */
- .dot = { .min = 25000 * 5, .max = 540000 * 5},
- .vco = { .min = 4800000, .max = 6480000 },
- .n = { .min = 1, .max = 1 },
- .m1 = { .min = 2, .max = 2 },
- .m2 = { .min = 24 << 22, .max = 175 << 22 },
- .p1 = { .min = 2, .max = 4 },
- .p2 = { .p2_slow = 1, .p2_fast = 14 },
-};
-
-static const struct intel_limit intel_limits_bxt = {
- /* FIXME: find real dot limits */
- .dot = { .min = 0, .max = INT_MAX },
- .vco = { .min = 4800000, .max = 6700000 },
- .n = { .min = 1, .max = 1 },
- .m1 = { .min = 2, .max = 2 },
- /* FIXME: find real m2 limits */
- .m2 = { .min = 2 << 22, .max = 255 << 22 },
- .p1 = { .min = 2, .max = 4 },
- .p2 = { .p2_slow = 1, .p2_fast = 20 },
-};
-
/* WA Display #0827: Gen9:all */
static void
skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
@@ -542,12 +204,6 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
}
static bool
-needs_modeset(const struct intel_crtc_state *state)
-{
- return drm_atomic_crtc_needs_modeset(&state->uapi);
-}
-
-static bool
is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
{
return crtc_state->master_transcoder != INVALID_TRANSCODER;
@@ -566,482 +222,6 @@ is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
is_trans_port_sync_slave(crtc_state);
}
-/*
- * Platform specific helpers to calculate the port PLL loopback- (clock.m),
- * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
- * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
- * The helpers' return value is the rate of the clock that is fed to the
- * display engine's pipe which can be the above fast dot clock rate or a
- * divided-down version of it.
- */
-/* m1 is reserved as 0 in Pineview, n is a ring counter */
-static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
-{
- clock->m = clock->m2 + 2;
- clock->p = clock->p1 * clock->p2;
- if (WARN_ON(clock->n == 0 || clock->p == 0))
- return 0;
- clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
- clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
- return clock->dot;
-}
-
-static u32 i9xx_dpll_compute_m(struct dpll *dpll)
-{
- return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
-}
-
-static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
-{
- clock->m = i9xx_dpll_compute_m(clock);
- clock->p = clock->p1 * clock->p2;
- if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
- return 0;
- clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
- clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
- return clock->dot;
-}
-
-static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
-{
- clock->m = clock->m1 * clock->m2;
- clock->p = clock->p1 * clock->p2;
- if (WARN_ON(clock->n == 0 || clock->p == 0))
- return 0;
- clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
- clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
- return clock->dot / 5;
-}
-
-int chv_calc_dpll_params(int refclk, struct dpll *clock)
-{
- clock->m = clock->m1 * clock->m2;
- clock->p = clock->p1 * clock->p2;
- if (WARN_ON(clock->n == 0 || clock->p == 0))
- return 0;
- clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
- clock->n << 22);
- clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
- return clock->dot / 5;
-}
-
-/*
- * Returns whether the given set of divisors are valid for a given refclk with
- * the given connectors.
- */
-static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
- const struct intel_limit *limit,
- const struct dpll *clock)
-{
- if (clock->n < limit->n.min || limit->n.max < clock->n)
- return false;
- if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
- return false;
- if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
- return false;
- if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
- return false;
-
- if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
- !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
- if (clock->m1 <= clock->m2)
- return false;
-
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_GEN9_LP(dev_priv)) {
- if (clock->p < limit->p.min || limit->p.max < clock->p)
- return false;
- if (clock->m < limit->m.min || limit->m.max < clock->m)
- return false;
- }
-
- if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
- return false;
- /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
- * connector, etc., rather than just a single range.
- */
- if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
- return false;
-
- return true;
-}
-
-static int
-i9xx_select_p2_div(const struct intel_limit *limit,
- const struct intel_crtc_state *crtc_state,
- int target)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- /*
- * For LVDS just rely on its current settings for dual-channel.
- * We haven't figured out how to reliably set up different
- * single/dual channel state, if we even can.
- */
- if (intel_is_dual_link_lvds(dev_priv))
- return limit->p2.p2_fast;
- else
- return limit->p2.p2_slow;
- } else {
- if (target < limit->p2.dot_limit)
- return limit->p2.p2_slow;
- else
- return limit->p2.p2_fast;
- }
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- *
- * Target and reference clocks are specified in kHz.
- *
- * If match_clock is provided, then best_clock P divider must match the P
- * divider from @match_clock used for LVDS downclocking.
- */
-static bool
-i9xx_find_best_dpll(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
- struct dpll *best_clock)
-{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
- struct dpll clock;
- int err = target;
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
-
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
- clock.m1++) {
- for (clock.m2 = limit->m2.min;
- clock.m2 <= limit->m2.max; clock.m2++) {
- if (clock.m2 >= clock.m1)
- break;
- for (clock.n = limit->n.min;
- clock.n <= limit->n.max; clock.n++) {
- for (clock.p1 = limit->p1.min;
- clock.p1 <= limit->p1.max; clock.p1++) {
- int this_err;
-
- i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
- limit,
- &clock))
- continue;
- if (match_clock &&
- clock.p != match_clock->p)
- continue;
-
- this_err = abs(clock.dot - target);
- if (this_err < err) {
- *best_clock = clock;
- err = this_err;
- }
- }
- }
- }
- }
-
- return (err != target);
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- *
- * Target and reference clocks are specified in kHz.
- *
- * If match_clock is provided, then best_clock P divider must match the P
- * divider from @match_clock used for LVDS downclocking.
- */
-static bool
-pnv_find_best_dpll(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
- struct dpll *best_clock)
-{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
- struct dpll clock;
- int err = target;
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
-
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
- clock.m1++) {
- for (clock.m2 = limit->m2.min;
- clock.m2 <= limit->m2.max; clock.m2++) {
- for (clock.n = limit->n.min;
- clock.n <= limit->n.max; clock.n++) {
- for (clock.p1 = limit->p1.min;
- clock.p1 <= limit->p1.max; clock.p1++) {
- int this_err;
-
- pnv_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
- limit,
- &clock))
- continue;
- if (match_clock &&
- clock.p != match_clock->p)
- continue;
-
- this_err = abs(clock.dot - target);
- if (this_err < err) {
- *best_clock = clock;
- err = this_err;
- }
- }
- }
- }
- }
-
- return (err != target);
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- *
- * Target and reference clocks are specified in kHz.
- *
- * If match_clock is provided, then best_clock P divider must match the P
- * divider from @match_clock used for LVDS downclocking.
- */
-static bool
-g4x_find_best_dpll(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
- struct dpll *best_clock)
-{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
- struct dpll clock;
- int max_n;
- bool found = false;
- /* approximately equals target * 0.00585 */
- int err_most = (target >> 8) + (target >> 9);
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
-
- max_n = limit->n.max;
- /* based on hardware requirement, prefer smaller n to precision */
- for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
- /* based on hardware requirement, prefere larger m1,m2 */
- for (clock.m1 = limit->m1.max;
- clock.m1 >= limit->m1.min; clock.m1--) {
- for (clock.m2 = limit->m2.max;
- clock.m2 >= limit->m2.min; clock.m2--) {
- for (clock.p1 = limit->p1.max;
- clock.p1 >= limit->p1.min; clock.p1--) {
- int this_err;
-
- i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
- limit,
- &clock))
- continue;
-
- this_err = abs(clock.dot - target);
- if (this_err < err_most) {
- *best_clock = clock;
- err_most = this_err;
- max_n = clock.n;
- found = true;
- }
- }
- }
- }
- }
- return found;
-}
-
-/*
- * Check if the calculated PLL configuration is more optimal compared to the
- * best configuration and error found so far. Return the calculated error.
- */
-static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
- const struct dpll *calculated_clock,
- const struct dpll *best_clock,
- unsigned int best_error_ppm,
- unsigned int *error_ppm)
-{
- /*
- * For CHV ignore the error and consider only the P value.
- * Prefer a bigger P value based on HW requirements.
- */
- if (IS_CHERRYVIEW(to_i915(dev))) {
- *error_ppm = 0;
-
- return calculated_clock->p > best_clock->p;
- }
-
- if (drm_WARN_ON_ONCE(dev, !target_freq))
- return false;
-
- *error_ppm = div_u64(1000000ULL *
- abs(target_freq - calculated_clock->dot),
- target_freq);
- /*
- * Prefer a better P value over a better (smaller) error if the error
- * is small. Ensure this preference for future configurations too by
- * setting the error to 0.
- */
- if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
- *error_ppm = 0;
-
- return true;
- }
-
- return *error_ppm + 10 < best_error_ppm;
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
-static bool
-vlv_find_best_dpll(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
- struct dpll *best_clock)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
- struct dpll clock;
- unsigned int bestppm = 1000000;
- /* min update 19.2 MHz */
- int max_n = min(limit->n.max, refclk / 19200);
- bool found = false;
-
- target *= 5; /* fast clock */
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- /* based on hardware requirement, prefer smaller n to precision */
- for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
- for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
- for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
- clock.p2 -= clock.p2 > 10 ? 2 : 1) {
- clock.p = clock.p1 * clock.p2;
- /* based on hardware requirement, prefer bigger m1,m2 values */
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
- unsigned int ppm;
-
- clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
- refclk * clock.m1);
-
- vlv_calc_dpll_params(refclk, &clock);
-
- if (!intel_pll_is_valid(to_i915(dev),
- limit,
- &clock))
- continue;
-
- if (!vlv_PLL_is_optimal(dev, target,
- &clock,
- best_clock,
- bestppm, &ppm))
- continue;
-
- *best_clock = clock;
- bestppm = ppm;
- found = true;
- }
- }
- }
- }
-
- return found;
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
-static bool
-chv_find_best_dpll(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
- struct dpll *best_clock)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
- unsigned int best_error_ppm;
- struct dpll clock;
- u64 m2;
- int found = false;
-
- memset(best_clock, 0, sizeof(*best_clock));
- best_error_ppm = 1000000;
-
- /*
- * Based on hardware doc, the n always set to 1, and m1 always
- * set to 2. If requires to support 200Mhz refclk, we need to
- * revisit this because n may not 1 anymore.
- */
- clock.n = 1, clock.m1 = 2;
- target *= 5; /* fast clock */
-
- for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
- for (clock.p2 = limit->p2.p2_fast;
- clock.p2 >= limit->p2.p2_slow;
- clock.p2 -= clock.p2 > 10 ? 2 : 1) {
- unsigned int error_ppm;
-
- clock.p = clock.p1 * clock.p2;
-
- m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
- refclk * clock.m1);
-
- if (m2 > INT_MAX/clock.m1)
- continue;
-
- clock.m2 = m2;
-
- chv_calc_dpll_params(refclk, &clock);
-
- if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
- continue;
-
- if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
- best_error_ppm, &error_ppm))
- continue;
-
- *best_clock = clock;
- best_error_ppm = error_ppm;
- found = true;
- }
- }
-
- return found;
-}
-
-bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
- struct dpll *best_clock)
-{
- int refclk = 100000;
- const struct intel_limit *limit = &intel_limits_bxt;
-
- return chv_find_best_dpll(limit, crtc_state,
- crtc_state->port_clock, refclk,
- NULL, best_clock);
-}
-
static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -1315,12 +495,6 @@ static void assert_planes_disabled(struct intel_crtc *crtc)
assert_plane_disabled(plane);
}
-static void assert_vblank_disabled(struct drm_crtc *crtc)
-{
- if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
- drm_crtc_vblank_put(crtc);
-}
-
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -1672,7 +846,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
- val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
}
@@ -1683,7 +857,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
if (HAS_PCH_IBX(dev_priv)) {
/* Configure frame start delay to match the CPU */
val &= ~TRANS_FRAME_START_DELAY_MASK;
- val |= TRANS_FRAME_START_DELAY(0);
+ val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
/*
* Make the BPC in transcoder be consistent with
@@ -1728,7 +902,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
- val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
@@ -1805,55 +979,6 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
return crtc->pipe;
}
-static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 mode_flags = crtc->mode_flags;
-
- /*
- * From Gen 11, In case of dsi cmd mode, frame counter wouldnt
- * have updated at the beginning of TE, if we want to use
- * the hw counter, then we would find it updated in only
- * the next TE, hence switching to sw counter.
- */
- if (mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | I915_MODE_FLAG_DSI_USE_TE1))
- return 0;
-
- /*
- * On i965gm the hardware frame counter reads
- * zero when the TV encoder is enabled :(
- */
- if (IS_I965GM(dev_priv) &&
- (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
- return 0;
-
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- return 0xffffffff; /* full 32 bit counter */
- else if (INTEL_GEN(dev_priv) >= 3)
- return 0xffffff; /* only 24 bits of frame count */
- else
- return 0; /* Gen2 doesn't have a hardware frame counter */
-}
-
-void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-
- assert_vblank_disabled(&crtc->base);
- drm_crtc_set_max_vblank_count(&crtc->base,
- intel_crtc_max_vblank_count(crtc_state));
- drm_crtc_vblank_on(&crtc->base);
-}
-
-void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-
- drm_crtc_vblank_off(&crtc->base);
- assert_vblank_disabled(&crtc->base);
-}
-
void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
@@ -1968,8 +1093,8 @@ static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
static bool is_gen12_ccs_modifier(u64 modifier)
{
return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
-
}
static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
@@ -1977,6 +1102,12 @@ static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
}
+static bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane)
+{
+ return fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC &&
+ plane == 2;
+}
+
static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
{
if (is_ccs_modifier(fb->modifier))
@@ -1998,6 +1129,9 @@ static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
ccs_plane < fb->format->num_planes / 2);
+ if (is_gen12_ccs_cc_plane(fb, ccs_plane))
+ return 0;
+
return ccs_plane - fb->format->num_planes / 2;
}
@@ -2016,7 +1150,7 @@ int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
- uint64_t modifier)
+ u64 modifier)
{
return info->is_yuv &&
info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
@@ -2048,6 +1182,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
return 128;
fallthrough;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
if (is_ccs_plane(fb, color_plane))
return 64;
@@ -2182,6 +1317,11 @@ static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_pr
return 0;
}
+static bool has_async_flips(struct drm_i915_private *i915)
+{
+ return INTEL_GEN(i915) >= 5;
+}
+
static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
int color_plane)
{
@@ -2196,7 +1336,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
case DRM_FORMAT_MOD_LINEAR:
return intel_linear_alignment(dev_priv);
case I915_FORMAT_MOD_X_TILED:
- if (INTEL_GEN(dev_priv) >= 9)
+ if (has_async_flips(dev_priv))
return 256 * 1024;
return 0;
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
@@ -2204,6 +1344,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
return intel_tile_row_size(fb, color_plane);
fallthrough;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
return 16 * 1024;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
@@ -2529,9 +1670,9 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
return offset_aligned;
}
-static u32 intel_plane_compute_aligned_offset(int *x, int *y,
- const struct intel_plane_state *state,
- int color_plane)
+u32 intel_plane_compute_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane)
{
struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
@@ -2605,6 +1746,7 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
return I915_TILING_Y;
default:
@@ -2683,6 +1825,25 @@ static const struct drm_format_info gen12_ccs_formats[] = {
.hsub = 2, .vsub = 2, .is_yuv = true },
};
+/*
+ * Same as gen12_ccs_formats[] above, but with additional surface used
+ * to pass Clear Color information in plane 2 with 64 bits of data.
+ */
+static const struct drm_format_info gen12_ccs_cc_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+};
+
static const struct drm_format_info *
lookup_format_info(const struct drm_format_info formats[],
int num_formats, u32 format)
@@ -2711,6 +1872,10 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
return lookup_format_info(gen12_ccs_formats,
ARRAY_SIZE(gen12_ccs_formats),
cmd->pixel_format);
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ return lookup_format_info(gen12_ccs_cc_formats,
+ ARRAY_SIZE(gen12_ccs_cc_formats),
+ cmd->pixel_format);
default:
return NULL;
}
@@ -2719,6 +1884,7 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
bool is_ccs_modifier(u64 modifier)
{
return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
@@ -2937,7 +2103,7 @@ intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
int ccs_x, ccs_y;
int main_x, main_y;
- if (!is_ccs_plane(fb, ccs_plane))
+ if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane))
return 0;
intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
@@ -3064,6 +2230,18 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
int x, y;
int ret;
+ /*
+ * Plane 2 of Render Compression with Clear Color fb modifier
+ * is consumed by the driver and not passed to DE. Skip the
+ * arithmetic related to alignment and offset calculation.
+ */
+ if (is_gen12_ccs_cc_plane(fb, i)) {
+ if (IS_ALIGNED(fb->offsets[i], PAGE_SIZE))
+ continue;
+ else
+ return -EINVAL;
+ }
+
cpp = fb->format->cpp[i];
intel_fb_plane_dims(&width, &height, fb, i);
@@ -3268,7 +2446,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
}
}
-static int
+int
intel_plane_compute_gtt(struct intel_plane_state *plane_state)
{
const struct intel_framebuffer *fb =
@@ -3548,7 +2726,7 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
}
-static void fixup_active_planes(struct intel_crtc_state *crtc_state)
+static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct drm_plane *plane;
@@ -3558,11 +2736,14 @@ static void fixup_active_planes(struct intel_crtc_state *crtc_state)
* have been used on the same (or wrong) pipe. plane_mask uses
* unique ids, hence we can use that to reconstruct active_planes.
*/
+ crtc_state->enabled_planes = 0;
crtc_state->active_planes = 0;
drm_for_each_plane_mask(plane, &dev_priv->drm,
- crtc_state->uapi.plane_mask)
+ crtc_state->uapi.plane_mask) {
+ crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
+ }
}
static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
@@ -3580,7 +2761,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
crtc->base.base.id, crtc->base.name);
intel_set_plane_visible(crtc_state, plane_state, false);
- fixup_active_planes(crtc_state);
+ fixup_plane_bitmasks(crtc_state);
crtc_state->data_rate[plane->id] = 0;
crtc_state->min_cdclk[plane->id] = 0;
@@ -3610,12 +2791,6 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_disable_plane(plane, crtc_state);
}
-static struct intel_frontbuffer *
-to_intel_frontbuffer(struct drm_framebuffer *fb)
-{
- return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
-}
-
static void
intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct intel_initial_plane_config *plane_config)
@@ -3814,33 +2989,19 @@ static int intel_plane_max_height(struct intel_plane *plane,
return INT_MAX;
}
-static int skl_check_main_surface(struct intel_plane_state *plane_state)
+int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
+ int *x, int *y, u32 *offset)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int rotation = plane_state->hw.rotation;
- int x = plane_state->uapi.src.x1 >> 16;
- int y = plane_state->uapi.src.y1 >> 16;
- int w = drm_rect_width(&plane_state->uapi.src) >> 16;
- int h = drm_rect_height(&plane_state->uapi.src) >> 16;
- int min_width = intel_plane_min_width(plane, fb, 0, rotation);
- int max_width = intel_plane_max_width(plane, fb, 0, rotation);
- int max_height = intel_plane_max_height(plane, fb, 0, rotation);
- int aux_plane = intel_main_to_aux_plane(fb, 0);
- u32 aux_offset = plane_state->color_plane[aux_plane].offset;
- u32 alignment, offset;
+ const int aux_plane = intel_main_to_aux_plane(fb, 0);
+ const u32 aux_offset = plane_state->color_plane[aux_plane].offset;
+ const u32 alignment = intel_surf_alignment(fb, 0);
+ const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
- if (w > max_width || w < min_width || h > max_height) {
- drm_dbg_kms(&dev_priv->drm,
- "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
- w, h, min_width, max_width, max_height);
- return -EINVAL;
- }
-
- intel_add_fb_offsets(&x, &y, plane_state, 0);
- offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
- alignment = intel_surf_alignment(fb, 0);
+ intel_add_fb_offsets(x, y, plane_state, 0);
+ *offset = intel_plane_compute_aligned_offset(x, y, plane_state, 0);
if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
return -EINVAL;
@@ -3849,9 +3010,10 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
* main surface offset, and it must be non-negative. Make
* sure that is what we will get.
*/
- if (aux_plane && offset > aux_offset)
- offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
- offset, aux_offset & ~(alignment - 1));
+ if (aux_plane && *offset > aux_offset)
+ *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
+ *offset,
+ aux_offset & ~(alignment - 1));
/*
* When using an X-tiled surface, the plane blows up
@@ -3862,18 +3024,51 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
int cpp = fb->format->cpp[0];
- while ((x + w) * cpp > plane_state->color_plane[0].stride) {
- if (offset == 0) {
+ while ((*x + w) * cpp > plane_state->color_plane[0].stride) {
+ if (*offset == 0) {
drm_dbg_kms(&dev_priv->drm,
"Unable to find suitable display surface offset due to X-tiling\n");
return -EINVAL;
}
- offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
- offset, offset - alignment);
+ *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
+ *offset,
+ *offset - alignment);
}
}
+ return 0;
+}
+
+static int skl_check_main_surface(struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const unsigned int rotation = plane_state->hw.rotation;
+ int x = plane_state->uapi.src.x1 >> 16;
+ int y = plane_state->uapi.src.y1 >> 16;
+ const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ const int h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ const int min_width = intel_plane_min_width(plane, fb, 0, rotation);
+ const int max_width = intel_plane_max_width(plane, fb, 0, rotation);
+ const int max_height = intel_plane_max_height(plane, fb, 0, rotation);
+ const int aux_plane = intel_main_to_aux_plane(fb, 0);
+ const u32 alignment = intel_surf_alignment(fb, 0);
+ u32 offset;
+ int ret;
+
+ if (w > max_width || w < min_width || h > max_height) {
+ drm_dbg_kms(&dev_priv->drm,
+ "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
+ w, h, min_width, max_width, max_height);
+ return -EINVAL;
+ }
+
+ ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
+ if (ret)
+ return ret;
+
/*
* CCS AUX surface doesn't have its own x/y offsets, we must make sure
* they match with the main surface x/y offsets.
@@ -3896,6 +3091,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
}
}
+ drm_WARN_ON(&dev_priv->drm, x > 8191 || y > 8191);
+
plane_state->color_plane[0].offset = offset;
plane_state->color_plane[0].x = x;
plane_state->color_plane[0].y = y;
@@ -3968,6 +3165,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
}
}
+ drm_WARN_ON(&i915->drm, x > 8191 || y > 8191);
+
plane_state->color_plane[uv_plane].offset = offset;
plane_state->color_plane[uv_plane].x = x;
plane_state->color_plane[uv_plane].y = y;
@@ -3988,7 +3187,8 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
int hsub, vsub;
int x, y;
- if (!is_ccs_plane(fb, ccs_plane))
+ if (!is_ccs_plane(fb, ccs_plane) ||
+ is_gen12_ccs_cc_plane(fb, ccs_plane))
continue;
intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
@@ -4060,422 +3260,6 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
return 0;
}
-static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- unsigned int *num, unsigned int *den)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int cpp = fb->format->cpp[0];
-
- /*
- * g4x bspec says 64bpp pixel rate can't exceed 80%
- * of cdclk when the sprite plane is enabled on the
- * same pipe. ilk/snb bspec says 64bpp pixel rate is
- * never allowed to exceed 80% of cdclk. Let's just go
- * with the ilk/snb limit always.
- */
- if (cpp == 8) {
- *num = 10;
- *den = 8;
- } else {
- *num = 1;
- *den = 1;
- }
-}
-
-static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- unsigned int pixel_rate;
- unsigned int num, den;
-
- /*
- * Note that crtc_state->pixel_rate accounts for both
- * horizontal and vertical panel fitter downscaling factors.
- * Pre-HSW bspec tells us to only consider the horizontal
- * downscaling factor here. We ignore that and just consider
- * both for simplicity.
- */
- pixel_rate = crtc_state->pixel_rate;
-
- i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
-
- /* two pixels per clock with double wide pipe */
- if (crtc_state->double_wide)
- den *= 2;
-
- return DIV_ROUND_UP(pixel_rate * num, den);
-}
-
-unsigned int
-i9xx_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-
- if (!HAS_GMCH(dev_priv)) {
- return 32*1024;
- } else if (INTEL_GEN(dev_priv) >= 4) {
- if (modifier == I915_FORMAT_MOD_X_TILED)
- return 16*1024;
- else
- return 32*1024;
- } else if (INTEL_GEN(dev_priv) >= 3) {
- if (modifier == I915_FORMAT_MOD_X_TILED)
- return 8*1024;
- else
- return 16*1024;
- } else {
- if (plane->i9xx_plane == PLANE_C)
- return 4*1024;
- else
- return 8*1024;
- }
-}
-
-static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dspcntr = 0;
-
- if (crtc_state->gamma_enable)
- dspcntr |= DISPPLANE_GAMMA_ENABLE;
-
- if (crtc_state->csc_enable)
- dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
-
- if (INTEL_GEN(dev_priv) < 5)
- dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
-
- return dspcntr;
-}
-
-static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int rotation = plane_state->hw.rotation;
- u32 dspcntr;
-
- dspcntr = DISPLAY_PLANE_ENABLE;
-
- if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
- IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
- dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
-
- switch (fb->format->format) {
- case DRM_FORMAT_C8:
- dspcntr |= DISPPLANE_8BPP;
- break;
- case DRM_FORMAT_XRGB1555:
- dspcntr |= DISPPLANE_BGRX555;
- break;
- case DRM_FORMAT_ARGB1555:
- dspcntr |= DISPPLANE_BGRA555;
- break;
- case DRM_FORMAT_RGB565:
- dspcntr |= DISPPLANE_BGRX565;
- break;
- case DRM_FORMAT_XRGB8888:
- dspcntr |= DISPPLANE_BGRX888;
- break;
- case DRM_FORMAT_XBGR8888:
- dspcntr |= DISPPLANE_RGBX888;
- break;
- case DRM_FORMAT_ARGB8888:
- dspcntr |= DISPPLANE_BGRA888;
- break;
- case DRM_FORMAT_ABGR8888:
- dspcntr |= DISPPLANE_RGBA888;
- break;
- case DRM_FORMAT_XRGB2101010:
- dspcntr |= DISPPLANE_BGRX101010;
- break;
- case DRM_FORMAT_XBGR2101010:
- dspcntr |= DISPPLANE_RGBX101010;
- break;
- case DRM_FORMAT_ARGB2101010:
- dspcntr |= DISPPLANE_BGRA101010;
- break;
- case DRM_FORMAT_ABGR2101010:
- dspcntr |= DISPPLANE_RGBA101010;
- break;
- case DRM_FORMAT_XBGR16161616F:
- dspcntr |= DISPPLANE_RGBX161616;
- break;
- default:
- MISSING_CASE(fb->format->format);
- return 0;
- }
-
- if (INTEL_GEN(dev_priv) >= 4 &&
- fb->modifier == I915_FORMAT_MOD_X_TILED)
- dspcntr |= DISPPLANE_TILED;
-
- if (rotation & DRM_MODE_ROTATE_180)
- dspcntr |= DISPPLANE_ROTATE_180;
-
- if (rotation & DRM_MODE_REFLECT_X)
- dspcntr |= DISPPLANE_MIRROR;
-
- return dspcntr;
-}
-
-int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int src_x, src_y, src_w;
- u32 offset;
- int ret;
-
- ret = intel_plane_compute_gtt(plane_state);
- if (ret)
- return ret;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- src_x = plane_state->uapi.src.x1 >> 16;
- src_y = plane_state->uapi.src.y1 >> 16;
-
- /* Undocumented hardware limit on i965/g4x/vlv/chv */
- if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
- return -EINVAL;
-
- intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
-
- if (INTEL_GEN(dev_priv) >= 4)
- offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
- plane_state, 0);
- else
- offset = 0;
-
- /*
- * Put the final coordinates back so that the src
- * coordinate checks will see the right values.
- */
- drm_rect_translate_to(&plane_state->uapi.src,
- src_x << 16, src_y << 16);
-
- /* HSW/BDW do this automagically in hardware */
- if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
- unsigned int rotation = plane_state->hw.rotation;
- int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
-
- if (rotation & DRM_MODE_ROTATE_180) {
- src_x += src_w - 1;
- src_y += src_h - 1;
- } else if (rotation & DRM_MODE_REFLECT_X) {
- src_x += src_w - 1;
- }
- }
-
- plane_state->color_plane[0].offset = offset;
- plane_state->color_plane[0].x = src_x;
- plane_state->color_plane[0].y = src_y;
-
- return 0;
-}
-
-static bool i9xx_plane_has_windowing(struct intel_plane *plane)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
-
- if (IS_CHERRYVIEW(dev_priv))
- return i9xx_plane == PLANE_B;
- else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- return false;
- else if (IS_GEN(dev_priv, 4))
- return i9xx_plane == PLANE_C;
- else
- return i9xx_plane == PLANE_B ||
- i9xx_plane == PLANE_C;
-}
-
-static int
-i9xx_plane_check(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- int ret;
-
- ret = chv_plane_check_rotation(plane_state);
- if (ret)
- return ret;
-
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- i9xx_plane_has_windowing(plane));
- if (ret)
- return ret;
-
- ret = i9xx_check_plane_surface(plane_state);
- if (ret)
- return ret;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- ret = intel_plane_check_src_coordinates(plane_state);
- if (ret)
- return ret;
-
- plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
-
- return 0;
-}
-
-static void i9xx_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- u32 linear_offset;
- int x = plane_state->color_plane[0].x;
- int y = plane_state->color_plane[0].y;
- int crtc_x = plane_state->uapi.dst.x1;
- int crtc_y = plane_state->uapi.dst.y1;
- int crtc_w = drm_rect_width(&plane_state->uapi.dst);
- int crtc_h = drm_rect_height(&plane_state->uapi.dst);
- unsigned long irqflags;
- u32 dspaddr_offset;
- u32 dspcntr;
-
- dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
-
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
- if (INTEL_GEN(dev_priv) >= 4)
- dspaddr_offset = plane_state->color_plane[0].offset;
- else
- dspaddr_offset = linear_offset;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
- plane_state->color_plane[0].stride);
-
- if (INTEL_GEN(dev_priv) < 4) {
- /*
- * PLANE_A doesn't actually have a full window
- * generator but let's assume we still need to
- * program whatever is there.
- */
- intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
- (crtc_y << 16) | crtc_x);
- intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
- ((crtc_h - 1) << 16) | (crtc_w - 1));
- } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
- intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
- (crtc_y << 16) | crtc_x);
- intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
- ((crtc_h - 1) << 16) | (crtc_w - 1));
- intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
- }
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
- (y << 16) | x);
- } else if (INTEL_GEN(dev_priv) >= 4) {
- intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
- linear_offset);
- intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
- (y << 16) | x);
- }
-
- /*
- * The control register self-arms if the plane was previously
- * disabled. Try to make the plane enable atomic by writing
- * the control register just before the surface register.
- */
- intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
- if (INTEL_GEN(dev_priv) >= 4)
- intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
- else
- intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void i9xx_disable_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- unsigned long irqflags;
- u32 dspcntr;
-
- /*
- * DSPCNTR pipe gamma enable on g4x+ and pipe csc
- * enable on ilk+ affect the pipe bottom color as
- * well, so we must configure them even if the plane
- * is disabled.
- *
- * On pre-g4x there is no way to gamma correct the
- * pipe bottom color but we'll keep on doing this
- * anyway so that the crtc state readout works correctly.
- */
- dspcntr = i9xx_plane_ctl_crtc(crtc_state);
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
- if (INTEL_GEN(dev_priv) >= 4)
- intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
- else
- intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
- enum pipe *pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum intel_display_power_domain power_domain;
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- intel_wakeref_t wakeref;
- bool ret;
- u32 val;
-
- /*
- * Not 100% correct for planes that can move between pipes,
- * but that's only the case for gen2-4 which don't have any
- * display power wells.
- */
- power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return false;
-
- val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
-
- ret = val & DISPLAY_PLANE_ENABLE;
-
- if (INTEL_GEN(dev_priv) >= 5)
- *pipe = plane->pipe;
- else
- *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
- DISPPLANE_SEL_PIPE_SHIFT;
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-
- return ret;
-}
-
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
{
struct drm_device *dev = intel_crtc->base.dev;
@@ -4644,6 +3428,7 @@ static u32 skl_plane_ctl_tiling(u64 fb_modifier)
case I915_FORMAT_MOD_Y_TILED:
return PLANE_CTL_TILED_Y;
case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
return PLANE_CTL_TILED_Y |
@@ -4704,9 +3489,6 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 plane_ctl = 0;
- if (crtc_state->uapi.async_flip)
- plane_ctl |= PLANE_CTL_ASYNC_FLIP;
-
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return plane_ctl;
@@ -4995,532 +3777,6 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
}
-static void intel_fdi_normal_train(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp;
-
- /* enable normal train */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (IS_IVYBRIDGE(dev_priv)) {
- temp &= ~FDI_LINK_TRAIN_NONE_IVB;
- temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
- }
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (HAS_PCH_CPT(dev_priv)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_NORMAL_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_NONE;
- }
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
-
- /* wait one idle pattern time */
- intel_de_posting_read(dev_priv, reg);
- udelay(1000);
-
- /* IVB wants error correction enabled */
- if (IS_IVYBRIDGE(dev_priv))
- intel_de_write(dev_priv, reg,
- intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
-}
-
-/* The FDI link training functions for ILK/Ibexpeak. */
-static void ilk_fdi_link_train(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp, tries;
-
- /* FDI needs bits from pipe first */
- assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
-
- /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- reg = FDI_RX_IMR(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- intel_de_write(dev_priv, reg, temp);
- intel_de_read(dev_priv, reg);
- udelay(150);
-
- /* enable CPU FDI TX and PCH FDI RX */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- /* Ironlake workaround, enable clock pointer after FDI enable*/
- intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
- FDI_RX_PHASE_SYNC_POINTER_OVR);
- intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
- FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
-
- reg = FDI_RX_IIR(pipe);
- for (tries = 0; tries < 5; tries++) {
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-
- if ((temp & FDI_RX_BIT_LOCK)) {
- drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
- intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
- break;
- }
- }
- if (tries == 5)
- drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
-
- /* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- reg = FDI_RX_IIR(pipe);
- for (tries = 0; tries < 5; tries++) {
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_SYMBOL_LOCK) {
- intel_de_write(dev_priv, reg,
- temp | FDI_RX_SYMBOL_LOCK);
- drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
- break;
- }
- }
- if (tries == 5)
- drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
-
- drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
-
-}
-
-static const int snb_b_fdi_train_param[] = {
- FDI_LINK_TRAIN_400MV_0DB_SNB_B,
- FDI_LINK_TRAIN_400MV_6DB_SNB_B,
- FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
- FDI_LINK_TRAIN_800MV_0DB_SNB_B,
-};
-
-/* The FDI link training functions for SNB/Cougarpoint. */
-static void gen6_fdi_link_train(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp, i, retry;
-
- /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- reg = FDI_RX_IMR(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- /* enable CPU FDI TX and PCH FDI RX */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- /* SNB-B */
- temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
-
- intel_de_write(dev_priv, FDI_RX_MISC(pipe),
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (HAS_PCH_CPT(dev_priv)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- }
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- for (i = 0; i < 4; i++) {
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[i];
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(500);
-
- for (retry = 0; retry < 5; retry++) {
- reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
- if (temp & FDI_RX_BIT_LOCK) {
- intel_de_write(dev_priv, reg,
- temp | FDI_RX_BIT_LOCK);
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 1 done.\n");
- break;
- }
- udelay(50);
- }
- if (retry < 5)
- break;
- }
- if (i == 4)
- drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
-
- /* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- if (IS_GEN(dev_priv, 6)) {
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- /* SNB-B */
- temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- }
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (HAS_PCH_CPT(dev_priv)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- }
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- for (i = 0; i < 4; i++) {
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[i];
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(500);
-
- for (retry = 0; retry < 5; retry++) {
- reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
- if (temp & FDI_RX_SYMBOL_LOCK) {
- intel_de_write(dev_priv, reg,
- temp | FDI_RX_SYMBOL_LOCK);
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 2 done.\n");
- break;
- }
- udelay(50);
- }
- if (retry < 5)
- break;
- }
- if (i == 4)
- drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
-
- drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
-}
-
-/* Manual link training for Ivy Bridge A0 parts */
-static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp, i, j;
-
- /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- reg = FDI_RX_IMR(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
- intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
-
- /* Try each vswing and preemphasis setting twice before moving on */
- for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
- /* disable first in case we need to retry */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
- temp &= ~FDI_TX_ENABLE;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_AUTO;
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp &= ~FDI_RX_ENABLE;
- intel_de_write(dev_priv, reg, temp);
-
- /* enable CPU FDI TX and PCH FDI RX */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[j/2];
- temp |= FDI_COMPOSITE_SYNC;
- intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
-
- intel_de_write(dev_priv, FDI_RX_MISC(pipe),
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- temp |= FDI_COMPOSITE_SYNC;
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(1); /* should be 0.5us */
-
- for (i = 0; i < 4; i++) {
- reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_BIT_LOCK ||
- (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
- intel_de_write(dev_priv, reg,
- temp | FDI_RX_BIT_LOCK);
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 1 done, level %i.\n",
- i);
- break;
- }
- udelay(1); /* should be 0.5us */
- }
- if (i == 4) {
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 1 fail on vswing %d\n", j / 2);
- continue;
- }
-
- /* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE_IVB;
- temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(2); /* should be 1.5us */
-
- for (i = 0; i < 4; i++) {
- reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_SYMBOL_LOCK ||
- (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
- intel_de_write(dev_priv, reg,
- temp | FDI_RX_SYMBOL_LOCK);
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 2 done, level %i.\n",
- i);
- goto train_done;
- }
- udelay(2); /* should be 1.5us */
- }
- if (i == 4)
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 2 fail on vswing %d\n", j / 2);
- }
-
-train_done:
- drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
-}
-
-static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- enum pipe pipe = intel_crtc->pipe;
- i915_reg_t reg;
- u32 temp;
-
- /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
- temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(200);
-
- /* Switch from Rawclk to PCDclk */
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(200);
-
- /* Enable CPU FDI TX PLL, always on for Ironlake */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if ((temp & FDI_TX_PLL_ENABLE) == 0) {
- intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(100);
- }
-}
-
-static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
-{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = intel_crtc->pipe;
- i915_reg_t reg;
- u32 temp;
-
- /* Switch from PCDclk to Rawclk */
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
-
- /* Disable CPU FDI TX PLL */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(100);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
-
- /* Wait for the clocks to turn off. */
- intel_de_posting_read(dev_priv, reg);
- udelay(100);
-}
-
-static void ilk_fdi_disable(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp;
-
- /* disable CPU FDI tx and PCH FDI rx */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
- intel_de_posting_read(dev_priv, reg);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~(0x7 << 16);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(100);
-
- /* Ironlake workaround, disable clock pointer after downing FDI */
- if (HAS_PCH_IBX(dev_priv))
- intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
- FDI_RX_PHASE_SYNC_POINTER_OVR);
-
- /* still set train pattern 1 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (HAS_PCH_CPT(dev_priv)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- }
- /* BPC in FDI rx is consistent with that in PIPECONF */
- temp &= ~(0x07 << 16);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(100);
-}
-
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
{
struct drm_crtc *crtc;
@@ -5751,7 +4007,7 @@ static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_st
* Finds the encoder associated with the given CRTC. This can only be
* used when we know that the CRTC isn't feeding multiple encoders!
*/
-static struct intel_encoder *
+struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
@@ -6270,7 +4526,7 @@ static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0);
}
-inline u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
+u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
{
if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
return (PS_FILTER_PROGRAMMED |
@@ -6469,7 +4725,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s
if (!old_crtc_state->ips_enabled)
return false;
- if (needs_modeset(new_crtc_state))
+ if (intel_crtc_needs_modeset(new_crtc_state))
return true;
/*
@@ -6496,7 +4752,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
if (!new_crtc_state->ips_enabled)
return false;
- if (needs_modeset(new_crtc_state))
+ if (intel_crtc_needs_modeset(new_crtc_state))
return true;
/*
@@ -6549,7 +4805,7 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
- return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
+ return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
new_crtc_state->active_planes;
}
@@ -6557,7 +4813,7 @@ static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
return old_crtc_state->active_planes &&
- (!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
+ (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
}
static void intel_post_plane_update(struct intel_atomic_state *state,
@@ -6589,41 +4845,72 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
icl_wa_scalerclkgating(dev_priv, pipe, false);
}
-static void skl_disable_async_flip_wa(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- const struct intel_crtc_state *new_crtc_state)
+static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u8 update_planes = crtc_state->update_planes;
+ const struct intel_plane_state *plane_state;
struct intel_plane *plane;
- struct intel_plane_state *new_plane_state;
int i;
- for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
- u32 update_mask = new_crtc_state->update_planes;
- u32 plane_ctl, surf_addr;
- enum plane_id plane_id;
- unsigned long irqflags;
- enum pipe pipe;
-
- if (crtc->pipe != plane->pipe ||
- !(update_mask & BIT(plane->id)))
- continue;
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->enable_flip_done &&
+ plane->pipe == crtc->pipe &&
+ update_planes & BIT(plane->id))
+ plane->enable_flip_done(plane);
+ }
+}
- plane_id = plane->id;
- pipe = plane->pipe;
+static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u8 update_planes = crtc_state->update_planes;
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int i;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- plane_ctl = intel_de_read_fw(dev_priv, PLANE_CTL(pipe, plane_id));
- surf_addr = intel_de_read_fw(dev_priv, PLANE_SURF(pipe, plane_id));
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->disable_flip_done &&
+ plane->pipe == crtc->pipe &&
+ update_planes & BIT(plane->id))
+ plane->disable_flip_done(plane);
+ }
+}
- plane_ctl &= ~PLANE_CTL_ASYNC_FLIP;
+static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u8 update_planes = new_crtc_state->update_planes;
+ const struct intel_plane_state *old_plane_state;
+ struct intel_plane *plane;
+ bool need_vbl_wait = false;
+ int i;
- intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
- intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), surf_addr);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
+ if (plane->need_async_flip_disable_wa &&
+ plane->pipe == crtc->pipe &&
+ update_planes & BIT(plane->id)) {
+ /*
+ * Apart from the async flip bit we want to
+ * preserve the old state for the plane.
+ */
+ plane->async_flip(plane, old_crtc_state,
+ old_plane_state, false);
+ need_vbl_wait = true;
+ }
}
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ if (need_vbl_wait)
+ intel_wait_for_vblank(i915, crtc->pipe);
}
static void intel_pre_plane_update(struct intel_atomic_state *state,
@@ -6680,7 +4967,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* If we're doing a modeset we don't need to do any
* pre-vblank watermark programming here.
*/
- if (!needs_modeset(new_crtc_state)) {
+ if (!intel_crtc_needs_modeset(new_crtc_state)) {
/*
* For platforms that support atomic watermarks, program the
* 'intermediate' watermarks immediately. On pre-gen9 platforms, these
@@ -6716,10 +5003,8 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* WA for platforms where async address update enable bit
* is double buffered and only latched at start of vblank.
*/
- if (old_crtc_state->uapi.async_flip &&
- !new_crtc_state->uapi.async_flip &&
- IS_GEN_RANGE(dev_priv, 9, 10))
- skl_disable_async_flip_wa(state, crtc, new_crtc_state);
+ if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
+ intel_crtc_async_flip_disable_wa(state, crtc);
}
static void intel_crtc_disable_planes(struct intel_atomic_state *state,
@@ -7139,7 +5424,7 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
val = intel_de_read(dev_priv, reg);
val &= ~HSW_FRAME_START_DELAY_MASK;
- val |= HSW_FRAME_START_DELAY(0);
+ val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
}
@@ -7574,25 +5859,25 @@ modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
enum intel_display_power_domain domain;
u64 domains, new_domains, old_domains;
- old_domains = crtc->enabled_power_domains;
- crtc->enabled_power_domains = new_domains =
- get_crtc_power_domains(crtc_state);
+ domains = get_crtc_power_domains(crtc_state);
- domains = new_domains & ~old_domains;
+ new_domains = domains & ~crtc->enabled_power_domains.mask;
+ old_domains = crtc->enabled_power_domains.mask & ~domains;
- for_each_power_domain(domain, domains)
- intel_display_power_get(dev_priv, domain);
+ for_each_power_domain(domain, new_domains)
+ intel_display_power_get_in_set(dev_priv,
+ &crtc->enabled_power_domains,
+ domain);
- return old_domains & ~new_domains;
+ return old_domains;
}
-static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
- u64 domains)
+static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
+ u64 domains)
{
- enum intel_display_power_domain domain;
-
- for_each_power_domain(domain, domains)
- intel_display_power_put_unchecked(dev_priv, domain);
+ intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
+ &crtc->enabled_power_domains,
+ domains);
}
static void valleyview_crtc_enable(struct intel_atomic_state *state,
@@ -7788,12 +6073,10 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
to_intel_dbuf_state(dev_priv->dbuf.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- enum intel_display_power_domain domain;
struct intel_plane *plane;
struct drm_atomic_state *state;
struct intel_crtc_state *temp_crtc_state;
enum pipe pipe = crtc->pipe;
- u64 domains;
int ret;
if (!crtc_state->hw.active)
@@ -7849,10 +6132,7 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
intel_update_watermarks(crtc);
intel_disable_shared_dpll(crtc_state);
- domains = crtc->enabled_power_domains;
- for_each_power_domain(domain, domains)
- intel_display_power_put_unchecked(dev_priv, domain);
- crtc->enabled_power_domains = 0;
+ intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
dev_priv->active_pipes &= ~BIT(pipe);
cdclk_state->min_cdclk[pipe] = 0;
@@ -7932,143 +6212,6 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
}
}
-static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
-{
- if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
- return crtc_state->fdi_lanes;
-
- return 0;
-}
-
-static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_atomic_state *state = pipe_config->uapi.state;
- struct intel_crtc *other_crtc;
- struct intel_crtc_state *other_crtc_state;
-
- drm_dbg_kms(&dev_priv->drm,
- "checking fdi config on pipe %c, lanes %i\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
- if (pipe_config->fdi_lanes > 4) {
- drm_dbg_kms(&dev_priv->drm,
- "invalid fdi lane config on pipe %c: %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
- return -EINVAL;
- }
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- if (pipe_config->fdi_lanes > 2) {
- drm_dbg_kms(&dev_priv->drm,
- "only 2 lanes on haswell, required: %i lanes\n",
- pipe_config->fdi_lanes);
- return -EINVAL;
- } else {
- return 0;
- }
- }
-
- if (INTEL_NUM_PIPES(dev_priv) == 2)
- return 0;
-
- /* Ivybridge 3 pipe is really complicated */
- switch (pipe) {
- case PIPE_A:
- return 0;
- case PIPE_B:
- if (pipe_config->fdi_lanes <= 2)
- return 0;
-
- other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
- other_crtc_state =
- intel_atomic_get_crtc_state(state, other_crtc);
- if (IS_ERR(other_crtc_state))
- return PTR_ERR(other_crtc_state);
-
- if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
- drm_dbg_kms(&dev_priv->drm,
- "invalid shared fdi lane config on pipe %c: %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
- return -EINVAL;
- }
- return 0;
- case PIPE_C:
- if (pipe_config->fdi_lanes > 2) {
- drm_dbg_kms(&dev_priv->drm,
- "only 2 lanes on pipe %c: required %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
- return -EINVAL;
- }
-
- other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
- other_crtc_state =
- intel_atomic_get_crtc_state(state, other_crtc);
- if (IS_ERR(other_crtc_state))
- return PTR_ERR(other_crtc_state);
-
- if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
- drm_dbg_kms(&dev_priv->drm,
- "fdi link B uses too many lanes to enable link C\n");
- return -EINVAL;
- }
- return 0;
- default:
- BUG();
- }
-}
-
-#define RETRY 1
-static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *i915 = to_i915(dev);
- const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- int lane, link_bw, fdi_dotclock, ret;
- bool needs_recompute = false;
-
-retry:
- /* FDI is a binary signal running at ~2.7GHz, encoding
- * each output octet as 10 bits. The actual frequency
- * is stored as a divider into a 100MHz clock, and the
- * mode pixel clock is stored in units of 1KHz.
- * Hence the bw of each lane in terms of the mode signal
- * is:
- */
- link_bw = intel_fdi_link_freq(i915, pipe_config);
-
- fdi_dotclock = adjusted_mode->crtc_clock;
-
- lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
- pipe_config->pipe_bpp);
-
- pipe_config->fdi_lanes = lane;
-
- intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
- link_bw, &pipe_config->fdi_m_n, false, false);
-
- ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
- if (ret == -EDEADLK)
- return ret;
-
- if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
- pipe_config->pipe_bpp -= 2*3;
- drm_dbg_kms(&i915->drm,
- "fdi link bw constraint, reducing pipe bpp to %i\n",
- pipe_config->pipe_bpp);
- needs_recompute = true;
- pipe_config->bw_constrained = true;
-
- goto retry;
- }
-
- if (needs_recompute)
- return RETRY;
-
- return ret;
-}
-
bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -8300,19 +6443,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
return -EINVAL;
}
- if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
- pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
- pipe_config->hw.ctm) {
- /*
- * There is only one pipe CSC unit per pipe, and we need that
- * for output conversion from RGB->YCBCR. So if CTM is already
- * applied we can't support YCBCR420 output.
- */
- drm_dbg_kms(&dev_priv->drm,
- "YCBCR420 and CTM together are not possible\n");
- return -EINVAL;
- }
-
/*
* Pipe horizontal size must be even in:
* - DVO ganged mode
@@ -8424,51 +6554,6 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
}
}
-static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
-{
- if (dev_priv->params.panel_use_ssc >= 0)
- return dev_priv->params.panel_use_ssc != 0;
- return dev_priv->vbt.lvds_use_ssc
- && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
-}
-
-static u32 pnv_dpll_compute_fp(struct dpll *dpll)
-{
- return (1 << dpll->n) << 16 | dpll->m2;
-}
-
-static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
-{
- return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
-}
-
-static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 fp, fp2 = 0;
-
- if (IS_PINEVIEW(dev_priv)) {
- fp = pnv_dpll_compute_fp(&crtc_state->dpll);
- if (reduced_clock)
- fp2 = pnv_dpll_compute_fp(reduced_clock);
- } else {
- fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
- if (reduced_clock)
- fp2 = i9xx_dpll_compute_fp(reduced_clock);
- }
-
- crtc_state->dpll_hw_state.fp0 = fp;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- reduced_clock) {
- crtc_state->dpll_hw_state.fp1 = fp2;
- } else {
- crtc_state->dpll_hw_state.fp1 = fp;
- }
-}
-
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
pipe)
{
@@ -8593,39 +6678,6 @@ void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_s
intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
}
-static void vlv_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
- DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
- if (crtc->pipe != PIPE_A)
- pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
- /* DPLL not used with DSI, but still need the rest set up */
- if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
- pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
- DPLL_EXT_BUFFER_ENABLE_VLV;
-
- pipe_config->dpll_hw_state.dpll_md =
- (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-}
-
-static void chv_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
- DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
- if (crtc->pipe != PIPE_A)
- pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
- /* DPLL not used with DSI, but still need the rest set up */
- if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
- pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
-
- pipe_config->dpll_hw_state.dpll_md =
- (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-}
-
static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config)
{
@@ -8885,128 +6937,7 @@ void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
vlv_disable_pll(dev_priv, pipe);
}
-static void i9xx_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dpll;
- struct dpll *clock = &crtc_state->dpll;
-
- i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
-
- dpll = DPLL_VGA_MODE_DIS;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
- dpll |= DPLLB_MODE_LVDS;
- else
- dpll |= DPLLB_MODE_DAC_SERIAL;
-
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
- dpll |= (crtc_state->pixel_multiplier - 1)
- << SDVO_MULTIPLIER_SHIFT_HIRES;
- }
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- dpll |= DPLL_SDVO_HIGH_SPEED;
-
- if (intel_crtc_has_dp_encoder(crtc_state))
- dpll |= DPLL_SDVO_HIGH_SPEED;
-
- /* compute bitmask from p1 value */
- if (IS_PINEVIEW(dev_priv))
- dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
- else {
- dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (IS_G4X(dev_priv) && reduced_clock)
- dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- }
- switch (clock->p2) {
- case 5:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
- break;
- case 7:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
- break;
- case 10:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
- break;
- case 14:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
- break;
- }
- if (INTEL_GEN(dev_priv) >= 4)
- dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
-
- if (crtc_state->sdvo_tv_clock)
- dpll |= PLL_REF_INPUT_TVCLKINBC;
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv))
- dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
- else
- dpll |= PLL_REF_INPUT_DREFCLK;
-
- dpll |= DPLL_VCO_ENABLE;
- crtc_state->dpll_hw_state.dpll = dpll;
-
- if (INTEL_GEN(dev_priv) >= 4) {
- u32 dpll_md = (crtc_state->pixel_multiplier - 1)
- << DPLL_MD_UDI_MULTIPLIER_SHIFT;
- crtc_state->dpll_hw_state.dpll_md = dpll_md;
- }
-}
-
-static void i8xx_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 dpll;
- struct dpll *clock = &crtc_state->dpll;
-
- i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
-
- dpll = DPLL_VGA_MODE_DIS;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- } else {
- if (clock->p1 == 2)
- dpll |= PLL_P1_DIVIDE_BY_TWO;
- else
- dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (clock->p2 == 4)
- dpll |= PLL_P2_DIVIDE_BY_4;
- }
-
- /*
- * Bspec:
- * "[Almador Errata}: For the correct operation of the muxed DVO pins
- * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
- * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
- * Enable) must be set to “1” in both the DPLL A Control Register
- * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
- *
- * For simplicity We simply keep both bits always enabled in
- * both DPLLS. The spec says we should disable the DVO 2X clock
- * when not needed, but this seems to work fine in practice.
- */
- if (IS_I830(dev_priv) ||
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
- dpll |= DPLL_DVO_2X_MODE;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv))
- dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
- else
- dpll |= PLL_REF_INPUT_DREFCLK;
-
- dpll |= DPLL_VCO_ENABLE;
- crtc_state->dpll_hw_state.dpll = dpll;
-}
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
{
@@ -9206,214 +7137,12 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
- pipeconf |= PIPECONF_FRAME_START_DELAY(0);
+ pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
}
-static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- const struct intel_limit *limit;
- int refclk = 48000;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
- "using SSC reference clock of %d kHz\n",
- refclk);
- }
-
- limit = &intel_limits_i8xx_lvds;
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
- limit = &intel_limits_i8xx_dvo;
- } else {
- limit = &intel_limits_i8xx_dac;
- }
-
- if (!crtc_state->clock_set &&
- !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- i8xx_compute_dpll(crtc, crtc_state, NULL);
-
- return 0;
-}
-
-static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_limit *limit;
- int refclk = 96000;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
- "using SSC reference clock of %d kHz\n",
- refclk);
- }
-
- if (intel_is_dual_link_lvds(dev_priv))
- limit = &intel_limits_g4x_dual_channel_lvds;
- else
- limit = &intel_limits_g4x_single_channel_lvds;
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
- limit = &intel_limits_g4x_hdmi;
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
- limit = &intel_limits_g4x_sdvo;
- } else {
- /* The option is for other outputs */
- limit = &intel_limits_i9xx_sdvo;
- }
-
- if (!crtc_state->clock_set &&
- !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- i9xx_compute_dpll(crtc, crtc_state, NULL);
-
- return 0;
-}
-
-static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- const struct intel_limit *limit;
- int refclk = 96000;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
- "using SSC reference clock of %d kHz\n",
- refclk);
- }
-
- limit = &pnv_limits_lvds;
- } else {
- limit = &pnv_limits_sdvo;
- }
-
- if (!crtc_state->clock_set &&
- !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- i9xx_compute_dpll(crtc, crtc_state, NULL);
-
- return 0;
-}
-
-static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- const struct intel_limit *limit;
- int refclk = 96000;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
- "using SSC reference clock of %d kHz\n",
- refclk);
- }
-
- limit = &intel_limits_i9xx_lvds;
- } else {
- limit = &intel_limits_i9xx_sdvo;
- }
-
- if (!crtc_state->clock_set &&
- !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- i9xx_compute_dpll(crtc, crtc_state, NULL);
-
- return 0;
-}
-
-static int chv_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- int refclk = 100000;
- const struct intel_limit *limit = &intel_limits_chv;
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (!crtc_state->clock_set &&
- !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- chv_compute_dpll(crtc, crtc_state);
-
- return 0;
-}
-
-static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- int refclk = 100000;
- const struct intel_limit *limit = &intel_limits_vlv;
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (!crtc_state->clock_set &&
- !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- vlv_compute_dpll(crtc, crtc_state);
-
- return 0;
-}
-
static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
{
if (IS_I830(dev_priv))
@@ -10315,7 +8044,7 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
- val |= PIPECONF_FRAME_START_DELAY(0);
+ val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, PIPECONF(pipe), val);
intel_de_posting_read(dev_priv, PIPECONF(pipe));
@@ -10423,172 +8152,6 @@ int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
return DIV_ROUND_UP(bps, link_bw * 8);
}
-static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
-{
- return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
-}
-
-static void ilk_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dpll, fp, fp2;
- int factor;
-
- /* Enable autotuning of the PLL clock (if permissible) */
- factor = 21;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if ((intel_panel_use_ssc(dev_priv) &&
- dev_priv->vbt.lvds_ssc_freq == 100000) ||
- (HAS_PCH_IBX(dev_priv) &&
- intel_is_dual_link_lvds(dev_priv)))
- factor = 25;
- } else if (crtc_state->sdvo_tv_clock) {
- factor = 20;
- }
-
- fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
-
- if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
- fp |= FP_CB_TUNE;
-
- if (reduced_clock) {
- fp2 = i9xx_dpll_compute_fp(reduced_clock);
-
- if (reduced_clock->m < factor * reduced_clock->n)
- fp2 |= FP_CB_TUNE;
- } else {
- fp2 = fp;
- }
-
- dpll = 0;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
- dpll |= DPLLB_MODE_LVDS;
- else
- dpll |= DPLLB_MODE_DAC_SERIAL;
-
- dpll |= (crtc_state->pixel_multiplier - 1)
- << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- dpll |= DPLL_SDVO_HIGH_SPEED;
-
- if (intel_crtc_has_dp_encoder(crtc_state))
- dpll |= DPLL_SDVO_HIGH_SPEED;
-
- /*
- * The high speed IO clock is only really required for
- * SDVO/HDMI/DP, but we also enable it for CRT to make it
- * possible to share the DPLL between CRT and HDMI. Enabling
- * the clock needlessly does no real harm, except use up a
- * bit of power potentially.
- *
- * We'll limit this to IVB with 3 pipes, since it has only two
- * DPLLs and so DPLL sharing is the only way to get three pipes
- * driving PCH ports at the same time. On SNB we could do this,
- * and potentially avoid enabling the second DPLL, but it's not
- * clear if it''s a win or loss power wise. No point in doing
- * this on ILK at all since it has a fixed DPLL<->pipe mapping.
- */
- if (INTEL_NUM_PIPES(dev_priv) == 3 &&
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
- dpll |= DPLL_SDVO_HIGH_SPEED;
-
- /* compute bitmask from p1 value */
- dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- /* also FPA1 */
- dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
-
- switch (crtc_state->dpll.p2) {
- case 5:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
- break;
- case 7:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
- break;
- case 10:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
- break;
- case 14:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
- break;
- }
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv))
- dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
- else
- dpll |= PLL_REF_INPUT_DREFCLK;
-
- dpll |= DPLL_VCO_ENABLE;
-
- crtc_state->dpll_hw_state.dpll = dpll;
- crtc_state->dpll_hw_state.fp0 = fp;
- crtc_state->dpll_hw_state.fp1 = fp2;
-}
-
-static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
- const struct intel_limit *limit;
- int refclk = 120000;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
- if (!crtc_state->has_pch_encoder)
- return 0;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
- "using SSC reference clock of %d kHz\n",
- dev_priv->vbt.lvds_ssc_freq);
- refclk = dev_priv->vbt.lvds_ssc_freq;
- }
-
- if (intel_is_dual_link_lvds(dev_priv)) {
- if (refclk == 100000)
- limit = &ilk_limits_dual_lvds_100m;
- else
- limit = &ilk_limits_dual_lvds;
- } else {
- if (refclk == 100000)
- limit = &ilk_limits_single_lvds_100m;
- else
- limit = &ilk_limits_single_lvds;
- }
- } else {
- limit = &ilk_limits_dac;
- }
-
- if (!crtc_state->clock_set &&
- !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- ilk_compute_dpll(crtc, crtc_state, NULL);
-
- if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
- drm_dbg_kms(&dev_priv->drm,
- "failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
- return -EINVAL;
- }
-
- return 0;
-}
-
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
@@ -11001,29 +8564,6 @@ out:
return ret;
}
-static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
-
- if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
- INTEL_GEN(dev_priv) >= 11) {
- struct intel_encoder *encoder =
- intel_get_crtc_new_encoder(state, crtc_state);
-
- if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
- drm_dbg_kms(&dev_priv->drm,
- "failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static void dg1_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
struct intel_crtc_state *pipe_config)
{
@@ -11225,16 +8765,13 @@ static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
- u64 *power_domain_mask,
- intel_wakeref_t *wakerefs)
+ struct intel_display_power_domain_set *power_domain_set)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum intel_display_power_domain power_domain;
unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
unsigned long enabled_panel_transcoders = 0;
enum transcoder panel_transcoder;
- intel_wakeref_t wf;
u32 tmp;
if (INTEL_GEN(dev_priv) >= 11)
@@ -11305,16 +8842,10 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
enabled_panel_transcoders != BIT(TRANSCODER_EDP));
- power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
- drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
-
- wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wf)
+ if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
+ POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
- wakerefs[power_domain] = wf;
- *power_domain_mask |= BIT_ULL(power_domain);
-
tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
return tmp & PIPECONF_ENABLE;
@@ -11322,14 +8853,11 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
- u64 *power_domain_mask,
- intel_wakeref_t *wakerefs)
+ struct intel_display_power_domain_set *power_domain_set)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum intel_display_power_domain power_domain;
enum transcoder cpu_transcoder;
- intel_wakeref_t wf;
enum port port;
u32 tmp;
@@ -11339,16 +8867,10 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
else
cpu_transcoder = TRANSCODER_DSI_C;
- power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
-
- wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wf)
+ if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
+ POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
continue;
- wakerefs[power_domain] = wf;
- *power_domain_mask |= BIT_ULL(power_domain);
-
/*
* The PLL needs to be enabled with a valid divider
* configuration, otherwise accessing DSI registers will hang
@@ -11431,30 +8953,20 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
- enum intel_display_power_domain power_domain;
- u64 power_domain_mask;
+ struct intel_display_power_domain_set power_domain_set = { };
bool active;
u32 tmp;
- pipe_config->master_transcoder = INVALID_TRANSCODER;
-
- power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
- wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wf)
+ if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
- wakerefs[power_domain] = wf;
- power_domain_mask = BIT_ULL(power_domain);
-
pipe_config->shared_dpll = NULL;
- active = hsw_get_transcoder_state(crtc, pipe_config,
- &power_domain_mask, wakerefs);
+ active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
if (IS_GEN9_LP(dev_priv) &&
- bxt_get_dsi_transcoder_state(crtc, pipe_config,
- &power_domain_mask, wakerefs)) {
+ bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
drm_WARN_ON(&dev_priv->drm, active);
active = true;
}
@@ -11477,6 +8989,9 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
intel_get_transcoder_timings(crtc, pipe_config);
}
+ if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
+ intel_vrr_get_config(crtc, pipe_config);
+
intel_get_pipe_src_size(crtc, pipe_config);
if (IS_HASWELL(dev_priv)) {
@@ -11518,14 +9033,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
pipe_config->ips_linetime =
REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
- power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
- drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain));
-
- wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (wf) {
- wakerefs[power_domain] = wf;
- power_domain_mask |= BIT_ULL(power_domain);
-
+ if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
+ POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
if (INTEL_GEN(dev_priv) >= 9)
skl_get_pfit_config(pipe_config);
else
@@ -11559,9 +9068,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
}
out:
- for_each_power_domain(power_domain, power_domain_mask)
- intel_display_power_put(dev_priv,
- power_domain, wakerefs[power_domain]);
+ intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
return active;
}
@@ -11581,569 +9088,6 @@ static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
return true;
}
-static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- u32 base;
-
- if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
- base = sg_dma_address(obj->mm.pages->sgl);
- else
- base = intel_plane_ggtt_offset(plane_state);
-
- return base + plane_state->color_plane[0].offset;
-}
-
-static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
-{
- int x = plane_state->uapi.dst.x1;
- int y = plane_state->uapi.dst.y1;
- u32 pos = 0;
-
- if (x < 0) {
- pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
- x = -x;
- }
- pos |= x << CURSOR_X_SHIFT;
-
- if (y < 0) {
- pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
- y = -y;
- }
- pos |= y << CURSOR_Y_SHIFT;
-
- return pos;
-}
-
-static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
-{
- const struct drm_mode_config *config =
- &plane_state->uapi.plane->dev->mode_config;
- int width = drm_rect_width(&plane_state->uapi.dst);
- int height = drm_rect_height(&plane_state->uapi.dst);
-
- return width > 0 && width <= config->cursor_width &&
- height > 0 && height <= config->cursor_height;
-}
-
-static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- unsigned int rotation = plane_state->hw.rotation;
- int src_x, src_y;
- u32 offset;
- int ret;
-
- ret = intel_plane_compute_gtt(plane_state);
- if (ret)
- return ret;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- src_x = plane_state->uapi.src.x1 >> 16;
- src_y = plane_state->uapi.src.y1 >> 16;
-
- intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
- offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
- plane_state, 0);
-
- if (src_x != 0 || src_y != 0) {
- drm_dbg_kms(&dev_priv->drm,
- "Arbitrary cursor panning not supported\n");
- return -EINVAL;
- }
-
- /*
- * Put the final coordinates back so that the src
- * coordinate checks will see the right values.
- */
- drm_rect_translate_to(&plane_state->uapi.src,
- src_x << 16, src_y << 16);
-
- /* ILK+ do this automagically in hardware */
- if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
-
- offset += (src_h * src_w - 1) * fb->format->cpp[0];
- }
-
- plane_state->color_plane[0].offset = offset;
- plane_state->color_plane[0].x = src_x;
- plane_state->color_plane[0].y = src_y;
-
- return 0;
-}
-
-static int intel_check_cursor(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
- const struct drm_rect src = plane_state->uapi.src;
- const struct drm_rect dst = plane_state->uapi.dst;
- int ret;
-
- if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
- drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
- return -EINVAL;
- }
-
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true);
- if (ret)
- return ret;
-
- /* Use the unclipped src/dst rectangles, which we program to hw */
- plane_state->uapi.src = src;
- plane_state->uapi.dst = dst;
-
- ret = intel_cursor_check_surface(plane_state);
- if (ret)
- return ret;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- ret = intel_plane_check_src_coordinates(plane_state);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static unsigned int
-i845_cursor_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
-{
- return 2048;
-}
-
-static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
- u32 cntl = 0;
-
- if (crtc_state->gamma_enable)
- cntl |= CURSOR_GAMMA_ENABLE;
-
- return cntl;
-}
-
-static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- return CURSOR_ENABLE |
- CURSOR_FORMAT_ARGB |
- CURSOR_STRIDE(plane_state->color_plane[0].stride);
-}
-
-static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
-{
- int width = drm_rect_width(&plane_state->uapi.dst);
-
- /*
- * 845g/865g are only limited by the width of their cursors,
- * the height is arbitrary up to the precision of the register.
- */
- return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
-}
-
-static int i845_check_cursor(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
- int ret;
-
- ret = intel_check_cursor(crtc_state, plane_state);
- if (ret)
- return ret;
-
- /* if we want to turn off the cursor ignore width and height */
- if (!fb)
- return 0;
-
- /* Check for which cursor types we support */
- if (!i845_cursor_size_ok(plane_state)) {
- drm_dbg_kms(&i915->drm,
- "Cursor dimension %dx%d not supported\n",
- drm_rect_width(&plane_state->uapi.dst),
- drm_rect_height(&plane_state->uapi.dst));
- return -EINVAL;
- }
-
- drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
- plane_state->color_plane[0].stride != fb->pitches[0]);
-
- switch (fb->pitches[0]) {
- case 256:
- case 512:
- case 1024:
- case 2048:
- break;
- default:
- drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
- fb->pitches[0]);
- return -EINVAL;
- }
-
- plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
-
- return 0;
-}
-
-static void i845_update_cursor(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- u32 cntl = 0, base = 0, pos = 0, size = 0;
- unsigned long irqflags;
-
- if (plane_state && plane_state->uapi.visible) {
- unsigned int width = drm_rect_width(&plane_state->uapi.dst);
- unsigned int height = drm_rect_height(&plane_state->uapi.dst);
-
- cntl = plane_state->ctl |
- i845_cursor_ctl_crtc(crtc_state);
-
- size = (height << 12) | width;
-
- base = intel_cursor_base(plane_state);
- pos = intel_cursor_position(plane_state);
- }
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- /* On these chipsets we can only modify the base/size/stride
- * whilst the cursor is disabled.
- */
- if (plane->cursor.base != base ||
- plane->cursor.size != size ||
- plane->cursor.cntl != cntl) {
- intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
- intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
- intel_de_write_fw(dev_priv, CURSIZE, size);
- intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
- intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
-
- plane->cursor.base = base;
- plane->cursor.size = size;
- plane->cursor.cntl = cntl;
- } else {
- intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
- }
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void i845_disable_cursor(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- i845_update_cursor(plane, crtc_state, NULL);
-}
-
-static bool i845_cursor_get_hw_state(struct intel_plane *plane,
- enum pipe *pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum intel_display_power_domain power_domain;
- intel_wakeref_t wakeref;
- bool ret;
-
- power_domain = POWER_DOMAIN_PIPE(PIPE_A);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return false;
-
- ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
-
- *pipe = PIPE_A;
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-
- return ret;
-}
-
-static unsigned int
-i9xx_cursor_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
-{
- return plane->base.dev->mode_config.cursor_width * 4;
-}
-
-static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 cntl = 0;
-
- if (INTEL_GEN(dev_priv) >= 11)
- return cntl;
-
- if (crtc_state->gamma_enable)
- cntl = MCURSOR_GAMMA_ENABLE;
-
- if (crtc_state->csc_enable)
- cntl |= MCURSOR_PIPE_CSC_ENABLE;
-
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
- cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
-
- return cntl;
-}
-
-static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- u32 cntl = 0;
-
- if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
- cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
-
- switch (drm_rect_width(&plane_state->uapi.dst)) {
- case 64:
- cntl |= MCURSOR_MODE_64_ARGB_AX;
- break;
- case 128:
- cntl |= MCURSOR_MODE_128_ARGB_AX;
- break;
- case 256:
- cntl |= MCURSOR_MODE_256_ARGB_AX;
- break;
- default:
- MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
- return 0;
- }
-
- if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
- cntl |= MCURSOR_ROTATE_180;
-
- return cntl;
-}
-
-static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- int width = drm_rect_width(&plane_state->uapi.dst);
- int height = drm_rect_height(&plane_state->uapi.dst);
-
- if (!intel_cursor_size_ok(plane_state))
- return false;
-
- /* Cursor width is limited to a few power-of-two sizes */
- switch (width) {
- case 256:
- case 128:
- case 64:
- break;
- default:
- return false;
- }
-
- /*
- * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
- * height from 8 lines up to the cursor width, when the
- * cursor is not rotated. Everything else requires square
- * cursors.
- */
- if (HAS_CUR_FBC(dev_priv) &&
- plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
- if (height < 8 || height > width)
- return false;
- } else {
- if (height != width)
- return false;
- }
-
- return true;
-}
-
-static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- enum pipe pipe = plane->pipe;
- int ret;
-
- ret = intel_check_cursor(crtc_state, plane_state);
- if (ret)
- return ret;
-
- /* if we want to turn off the cursor ignore width and height */
- if (!fb)
- return 0;
-
- /* Check for which cursor types we support */
- if (!i9xx_cursor_size_ok(plane_state)) {
- drm_dbg(&dev_priv->drm,
- "Cursor dimension %dx%d not supported\n",
- drm_rect_width(&plane_state->uapi.dst),
- drm_rect_height(&plane_state->uapi.dst));
- return -EINVAL;
- }
-
- drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
- plane_state->color_plane[0].stride != fb->pitches[0]);
-
- if (fb->pitches[0] !=
- drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
- drm_dbg_kms(&dev_priv->drm,
- "Invalid cursor stride (%u) (cursor width %d)\n",
- fb->pitches[0],
- drm_rect_width(&plane_state->uapi.dst));
- return -EINVAL;
- }
-
- /*
- * There's something wrong with the cursor on CHV pipe C.
- * If it straddles the left edge of the screen then
- * moving it away from the edge or disabling it often
- * results in a pipe underrun, and often that can lead to
- * dead pipe (constant underrun reported, and it scans
- * out just a solid color). To recover from that, the
- * display power well must be turned off and on again.
- * Refuse the put the cursor into that compromised position.
- */
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
- plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
- drm_dbg_kms(&dev_priv->drm,
- "CHV cursor C not allowed to straddle the left screen edge\n");
- return -EINVAL;
- }
-
- plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
-
- return 0;
-}
-
-static void i9xx_update_cursor(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum pipe pipe = plane->pipe;
- u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
- unsigned long irqflags;
-
- if (plane_state && plane_state->uapi.visible) {
- unsigned width = drm_rect_width(&plane_state->uapi.dst);
- unsigned height = drm_rect_height(&plane_state->uapi.dst);
-
- cntl = plane_state->ctl |
- i9xx_cursor_ctl_crtc(crtc_state);
-
- if (width != height)
- fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
-
- base = intel_cursor_base(plane_state);
- pos = intel_cursor_position(plane_state);
- }
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- /*
- * On some platforms writing CURCNTR first will also
- * cause CURPOS to be armed by the CURBASE write.
- * Without the CURCNTR write the CURPOS write would
- * arm itself. Thus we always update CURCNTR before
- * CURPOS.
- *
- * On other platforms CURPOS always requires the
- * CURBASE write to arm the update. Additonally
- * a write to any of the cursor register will cancel
- * an already armed cursor update. Thus leaving out
- * the CURBASE write after CURPOS could lead to a
- * cursor that doesn't appear to move, or even change
- * shape. Thus we always write CURBASE.
- *
- * The other registers are armed by by the CURBASE write
- * except when the plane is getting enabled at which time
- * the CURCNTR write arms the update.
- */
-
- if (INTEL_GEN(dev_priv) >= 9)
- skl_write_cursor_wm(plane, crtc_state);
-
- if (!needs_modeset(crtc_state))
- intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0);
-
- if (plane->cursor.base != base ||
- plane->cursor.size != fbc_ctl ||
- plane->cursor.cntl != cntl) {
- if (HAS_CUR_FBC(dev_priv))
- intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
- fbc_ctl);
- intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
- intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
- intel_de_write_fw(dev_priv, CURBASE(pipe), base);
-
- plane->cursor.base = base;
- plane->cursor.size = fbc_ctl;
- plane->cursor.cntl = cntl;
- } else {
- intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
- intel_de_write_fw(dev_priv, CURBASE(pipe), base);
- }
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void i9xx_disable_cursor(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- i9xx_update_cursor(plane, crtc_state, NULL);
-}
-
-static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
- enum pipe *pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum intel_display_power_domain power_domain;
- intel_wakeref_t wakeref;
- bool ret;
- u32 val;
-
- /*
- * Not 100% correct for planes that can move between pipes,
- * but that's only the case for gen2-3 which don't have any
- * display power wells.
- */
- power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return false;
-
- val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
-
- ret = val & MCURSOR_MODE;
-
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- *pipe = plane->pipe;
- else
- *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
- MCURSOR_PIPE_SELECT_SHIFT;
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-
- return ret;
-}
-
/* VESA 640x480x72Hz mode to set on the pipe */
static const struct drm_display_mode load_detect_mode = {
DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
@@ -12526,33 +9470,6 @@ static void ilk_pch_clock_get(struct intel_crtc *crtc,
&pipe_config->fdi_m_n);
}
-static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
- struct intel_crtc *crtc)
-{
- memset(crtc_state, 0, sizeof(*crtc_state));
-
- __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
-
- crtc_state->cpu_transcoder = INVALID_TRANSCODER;
- crtc_state->master_transcoder = INVALID_TRANSCODER;
- crtc_state->hsw_workaround_pipe = INVALID_PIPE;
- crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
- crtc_state->scaler_state.scaler_id = -1;
- crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
-}
-
-static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
-{
- struct intel_crtc_state *crtc_state;
-
- crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
-
- if (crtc_state)
- intel_crtc_state_reset(crtc_state, crtc);
-
- return crtc_state;
-}
-
/* Returns the currently programmed mode of the given encoder. */
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder)
@@ -12593,14 +9510,6 @@ intel_encoder_current_mode(struct intel_encoder *encoder)
return mode;
}
-static void intel_crtc_destroy(struct drm_crtc *crtc)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- drm_crtc_cleanup(crtc);
- kfree(intel_crtc);
-}
-
/**
* intel_wm_need_update - Check whether watermarks need updating
* @cur: current plane state
@@ -12650,7 +9559,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- bool mode_changed = needs_modeset(crtc_state);
+ bool mode_changed = intel_crtc_needs_modeset(crtc_state);
bool was_crtc_enabled = old_crtc_state->hw.active;
bool is_crtc_enabled = crtc_state->hw.active;
bool turn_off, turn_on, visible, was_visible;
@@ -12841,6 +9750,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
plane_state->planar_linked_plane = NULL;
if (plane_state->planar_slave && !plane_state->uapi.visible) {
+ crtc_state->enabled_planes &= ~BIT(plane->id);
crtc_state->active_planes &= ~BIT(plane->id);
crtc_state->update_planes |= BIT(plane->id);
}
@@ -12884,6 +9794,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
linked_state->planar_slave = true;
linked_state->planar_linked_plane = plane;
+ crtc_state->enabled_planes |= BIT(linked->id);
crtc_state->active_planes |= BIT(linked->id);
crtc_state->update_planes |= BIT(linked->id);
drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
@@ -13012,7 +9923,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- bool mode_changed = needs_modeset(crtc_state);
+ bool mode_changed = intel_crtc_needs_modeset(crtc_state);
int ret;
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
@@ -13427,6 +10338,13 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
intel_hdmi_infoframe_enable(DP_SDP_VSC))
intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
+ drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
+ yesno(pipe_config->vrr.enable),
+ pipe_config->vrr.vmin, pipe_config->vrr.vmax,
+ pipe_config->vrr.pipeline_full, pipe_config->vrr.flipline,
+ intel_vrr_vmin_vblank_start(pipe_config),
+ intel_vrr_vmax_vblank_start(pipe_config));
+
drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->hw.mode);
drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
@@ -13814,7 +10732,7 @@ encoder_retry:
return ret;
}
- if (ret == RETRY) {
+ if (ret == I915_DISPLAY_CONFIG_RETRY) {
if (drm_WARN(&i915->drm, !retry,
"loop in pipe configuration computation\n"))
return -EINVAL;
@@ -14420,6 +11338,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(mst_master_transcoder);
+ PIPE_CONF_CHECK_BOOL(vrr.enable);
+ PIPE_CONF_CHECK_I(vrr.vmin);
+ PIPE_CONF_CHECK_I(vrr.vmax);
+ PIPE_CONF_CHECK_I(vrr.flipline);
+ PIPE_CONF_CHECK_I(vrr.pipeline_full);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_BOOL
@@ -14844,7 +11768,7 @@ intel_modeset_verify_crtc(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
- if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
+ if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
return;
verify_wm_state(crtc, new_crtc_state);
@@ -14878,10 +11802,17 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
+ struct drm_display_mode adjusted_mode =
+ crtc_state->hw.adjusted_mode;
+
+ if (crtc_state->vrr.enable) {
+ adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
+ adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
+ adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
+ crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
+ }
- drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
+ drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
crtc->mode_flags = crtc_state->mode_flags;
@@ -14915,8 +11846,8 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
if (IS_GEN(dev_priv, 2)) {
int vtotal;
- vtotal = adjusted_mode->crtc_vtotal;
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vtotal = adjusted_mode.crtc_vtotal;
+ if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
crtc->scanline_offset = vtotal - 1;
@@ -14939,7 +11870,7 @@ static void intel_modeset_clear_plls(struct intel_atomic_state *state)
return;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state))
+ if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
intel_release_shared_dplls(state, crtc);
@@ -14964,7 +11895,7 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
/* look at all crtc's that are going to be enabled in during modeset */
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
if (!crtc_state->hw.active ||
- !needs_modeset(crtc_state))
+ !intel_crtc_needs_modeset(crtc_state))
continue;
if (first_crtc_state) {
@@ -14989,7 +11920,7 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
crtc_state->hsw_workaround_pipe = INVALID_PIPE;
if (!crtc_state->hw.active ||
- needs_modeset(crtc_state))
+ intel_crtc_needs_modeset(crtc_state))
continue;
/* 2 or more enabled crtcs means no need for w/a */
@@ -15101,6 +12032,19 @@ static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
return 0;
}
+int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ return intel_crtc_add_planes_to_state(state, crtc,
+ old_crtc_state->enabled_planes |
+ new_crtc_state->enabled_planes);
+}
+
static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
{
/* See {hsw,vlv,ivb}_plane_ratio() */
@@ -15295,7 +12239,7 @@ static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->hw.enable &&
transcoders & BIT(new_crtc_state->cpu_transcoder) &&
- needs_modeset(new_crtc_state))
+ intel_crtc_needs_modeset(new_crtc_state))
return true;
}
@@ -15316,7 +12260,7 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
slave = crtc;
master = old_crtc_state->bigjoiner_linked_crtc;
master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
- if (!master_crtc_state || !needs_modeset(master_crtc_state))
+ if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
goto claimed;
}
@@ -15354,21 +12298,16 @@ claimed:
return -EINVAL;
}
-static int kill_bigjoiner_slave(struct intel_atomic_state *state,
- struct intel_crtc_state *master_crtc_state)
+static void kill_bigjoiner_slave(struct intel_atomic_state *state,
+ struct intel_crtc_state *master_crtc_state)
{
struct intel_crtc_state *slave_crtc_state =
- intel_atomic_get_crtc_state(&state->base,
- master_crtc_state->bigjoiner_linked_crtc);
-
- if (IS_ERR(slave_crtc_state))
- return PTR_ERR(slave_crtc_state);
+ intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
- return 0;
}
/**
@@ -15381,7 +12320,7 @@ static int kill_bigjoiner_slave(struct intel_atomic_state *state,
* Async flip can only change the plane surface address, so anything else
* changing is rejected from the intel_atomic_check_async() function.
* Once this check is cleared, flip done interrupt is enabled using
- * the skl_enable_flip_done() function.
+ * the intel_crtc_enable_flip_done() function.
*
* As soon as the surface address register is written, flip done interrupt is
* generated and the requested events are sent to the usersapce in the interrupt
@@ -15400,7 +12339,7 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (needs_modeset(new_crtc_state)) {
+ if (intel_crtc_needs_modeset(new_crtc_state)) {
drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
return -EINVAL;
}
@@ -15425,7 +12364,7 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
* this(vlv/chv and icl+) should be added when async flip is
* enabled in the atomic IOCTL path.
*/
- if (plane->id != PLANE_PRIMARY)
+ if (!plane->async_flip)
return -EINVAL;
/*
@@ -15506,20 +12445,43 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
{
- const struct intel_crtc_state *crtc_state;
+ struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
int i;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
struct intel_crtc_state *linked_crtc_state;
+ struct intel_crtc *linked_crtc;
+ int ret;
if (!crtc_state->bigjoiner)
continue;
- linked_crtc_state = intel_atomic_get_crtc_state(&state->base,
- crtc_state->bigjoiner_linked_crtc);
+ linked_crtc = crtc_state->bigjoiner_linked_crtc;
+ linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
if (IS_ERR(linked_crtc_state))
return PTR_ERR(linked_crtc_state);
+
+ if (!intel_crtc_needs_modeset(crtc_state))
+ continue;
+
+ linked_crtc_state->uapi.mode_changed = true;
+
+ ret = drm_atomic_add_affected_connectors(&state->base,
+ &linked_crtc->base);
+ if (ret)
+ return ret;
+
+ ret = intel_atomic_add_affected_planes(state, linked_crtc);
+ if (ret)
+ return ret;
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ /* Kill old bigjoiner link, we may re-establish afterwards */
+ if (intel_crtc_needs_modeset(crtc_state) &&
+ crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
+ kill_bigjoiner_slave(state, crtc_state);
}
return 0;
@@ -15546,6 +12508,8 @@ static int intel_atomic_check(struct drm_device *dev,
new_crtc_state->uapi.mode_changed = true;
}
+ intel_vrr_check_modeset(state);
+
ret = drm_atomic_helper_check_modeset(dev, &state->base);
if (ret)
goto fail;
@@ -15556,20 +12520,13 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state)) {
+ if (!intel_crtc_needs_modeset(new_crtc_state)) {
/* Light copy */
intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
continue;
}
- /* Kill old bigjoiner link, we may re-establish afterwards */
- if (old_crtc_state->bigjoiner && !old_crtc_state->bigjoiner_slave) {
- ret = kill_bigjoiner_slave(state, new_crtc_state);
- if (ret)
- goto fail;
- }
-
if (!new_crtc_state->uapi.enable) {
if (!new_crtc_state->bigjoiner_slave) {
intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
@@ -15594,7 +12551,7 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state))
+ if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
ret = intel_modeset_pipe_config_late(new_crtc_state);
@@ -15616,7 +12573,7 @@ static int intel_atomic_check(struct drm_device *dev,
* forced a full modeset.
*/
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state))
+ if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
continue;
if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
@@ -15639,11 +12596,21 @@ static int intel_atomic_check(struct drm_device *dev,
new_crtc_state->update_pipe = false;
}
}
+
+ if (new_crtc_state->bigjoiner) {
+ struct intel_crtc_state *linked_crtc_state =
+ intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
+
+ if (intel_crtc_needs_modeset(linked_crtc_state)) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
+ }
+ }
}
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (needs_modeset(new_crtc_state)) {
+ if (intel_crtc_needs_modeset(new_crtc_state)) {
any_ms = true;
continue;
}
@@ -15669,20 +12636,6 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- /*
- * distrust_bios_wm will force a full dbuf recomputation
- * but the hardware state will only get updated accordingly
- * if state->modeset==true. Hence distrust_bios_wm==true &&
- * state->modeset==false is an invalid combination which
- * would cause the hardware and software dbuf state to get
- * out of sync. We must prevent that.
- *
- * FIXME clean up this mess and introduce better
- * state tracking for dbuf.
- */
- if (dev_priv->wm.distrust_bios_wm)
- any_ms = true;
-
intel_fbc_choose_crtc(dev_priv, state);
ret = calc_watermark_data(state);
if (ret)
@@ -15720,12 +12673,12 @@ static int intel_atomic_check(struct drm_device *dev,
goto fail;
}
- if (!needs_modeset(new_crtc_state) &&
+ if (!intel_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->update_pipe)
continue;
intel_dump_pipe_config(new_crtc_state, state,
- needs_modeset(new_crtc_state) ?
+ intel_crtc_needs_modeset(new_crtc_state) ?
"[modeset]" : "[fastset]");
}
@@ -15757,7 +12710,7 @@ static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
return ret;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- bool mode_changed = needs_modeset(crtc_state);
+ bool mode_changed = intel_crtc_needs_modeset(crtc_state);
if (mode_changed || crtc_state->update_pipe ||
crtc_state->uapi.color_mgmt_changed) {
@@ -15768,17 +12721,6 @@ static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
return 0;
}
-u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
-
- if (!vblank->max_vblank_count)
- return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
-
- return crtc->base.funcs->get_vblank_counter(&crtc->base);
-}
-
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
@@ -15848,7 +12790,7 @@ static void commit_pipe_config(struct intel_atomic_state *state,
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- bool modeset = needs_modeset(new_crtc_state);
+ bool modeset = intel_crtc_needs_modeset(new_crtc_state);
/*
* During modesets pipe configuration was programmed as the
@@ -15882,7 +12824,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- if (!needs_modeset(new_crtc_state))
+ if (!intel_crtc_needs_modeset(new_crtc_state))
return;
intel_crtc_update_active_timings(new_crtc_state);
@@ -15904,7 +12846,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- bool modeset = needs_modeset(new_crtc_state);
+ bool modeset = intel_crtc_needs_modeset(new_crtc_state);
if (!modeset) {
if (new_crtc_state->preload_luts &&
@@ -15996,7 +12938,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
/* Only disable port sync and MST slaves */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
+ if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
continue;
if (!old_crtc_state->hw.active)
@@ -16020,7 +12962,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
/* Disable everything else left on */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state) ||
+ if (!intel_crtc_needs_modeset(new_crtc_state) ||
(handled & BIT(crtc->pipe)) ||
old_crtc_state->bigjoiner_slave)
continue;
@@ -16070,7 +13012,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
continue;
/* ignore allocations for crtc's that have been turned off. */
- if (!needs_modeset(new_crtc_state)) {
+ if (!intel_crtc_needs_modeset(new_crtc_state)) {
entries[pipe] = old_crtc_state->wm.skl.ddb;
update_pipes |= BIT(pipe);
} else {
@@ -16246,6 +13188,43 @@ static void intel_atomic_cleanup_work(struct work_struct *work)
intel_atomic_helper_free_state(i915);
}
+static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_plane *plane;
+ struct intel_plane_state *plane_state;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct drm_framebuffer *fb = plane_state->hw.fb;
+ int ret;
+
+ if (!fb ||
+ fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
+ continue;
+
+ /*
+ * The layout of the fast clear color value expected by HW
+ * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
+ * - 4 x 4 bytes per-channel value
+ * (in surface type specific float/int format provided by the fb user)
+ * - 8 bytes native color value used by the display
+ * (converted/written by GPU during a fast clear operation using the
+ * above per-channel values)
+ *
+ * The commit's FB prepare hook already ensured that FB obj is pinned and the
+ * caller made sure that the object is synced wrt. the related color clear value
+ * GPU write on it.
+ */
+ ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
+ fb->offsets[2] + 16,
+ &plane_state->ccval,
+ sizeof(plane_state->ccval));
+ /* The above could only fail if the FB obj has an unexpected backing store type. */
+ drm_WARN_ON(&i915->drm, ret);
+ }
+}
+
static void intel_atomic_commit_tail(struct intel_atomic_state *state)
{
struct drm_device *dev = state->base.dev;
@@ -16263,9 +13242,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (state->modeset)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
+ intel_atomic_prepare_plane_clear_colors(state);
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (needs_modeset(new_crtc_state) ||
+ if (intel_crtc_needs_modeset(new_crtc_state) ||
new_crtc_state->update_pipe) {
put_domains[crtc->pipe] =
@@ -16291,7 +13272,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
/* Complete the events for pipes that have now been disabled */
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- bool modeset = needs_modeset(new_crtc_state);
+ bool modeset = intel_crtc_needs_modeset(new_crtc_state);
/* Complete events for now disable pipes here. */
if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
@@ -16311,7 +13292,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->uapi.async_flip)
- skl_enable_flip_done(crtc);
+ intel_crtc_enable_flip_done(state, crtc);
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -16336,10 +13317,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->uapi.async_flip)
- skl_disable_flip_done(crtc);
+ intel_crtc_disable_flip_done(state, crtc);
if (new_crtc_state->hw.active &&
- !needs_modeset(new_crtc_state) &&
+ !intel_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->preload_luts &&
(new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe))
@@ -16375,8 +13356,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
intel_post_plane_update(state, crtc);
- if (put_domains[i])
- modeset_put_power_domains(dev_priv, put_domains[i]);
+ modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
@@ -16540,7 +13520,6 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
}
- dev_priv->wm.distrust_bios_wm = false;
intel_shared_dpll_swap_state(state);
intel_atomic_track_fbs(state);
@@ -16619,7 +13598,7 @@ static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
}
-static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
+int intel_plane_pin_fb(struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -16649,7 +13628,7 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
return 0;
}
-static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
+void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
{
struct i915_vma *vma;
@@ -16658,15 +13637,6 @@ static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
intel_unpin_fb_vma(vma, old_plane_state->flags);
}
-static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
-{
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
- };
-
- i915_gem_object_wait_priority(obj, 0, &attr);
-}
-
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @_plane: drm plane to prepare for
@@ -16683,6 +13653,9 @@ int
intel_prepare_plane_fb(struct drm_plane *_plane,
struct drm_plane_state *_new_plane_state)
{
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
+ };
struct intel_plane *plane = to_intel_plane(_plane);
struct intel_plane_state *new_plane_state =
to_intel_plane_state(_new_plane_state);
@@ -16711,7 +13684,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
- if (needs_modeset(crtc_state)) {
+ if (intel_crtc_needs_modeset(crtc_state)) {
ret = i915_sw_fence_await_reservation(&state->commit_ready,
old_obj->base.resv, NULL,
false, 0,
@@ -16722,6 +13695,8 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
}
if (new_plane_state->uapi.fence) { /* explicit fencing */
+ i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
+ &attr);
ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
new_plane_state->uapi.fence,
i915_fence_timeout(dev_priv),
@@ -16743,7 +13718,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
if (ret)
return ret;
- fb_obj_bump_render_priority(obj);
+ i915_gem_object_wait_priority(obj, 0, &attr);
i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
if (!new_plane_state->uapi.fence) { /* implicit fencing */
@@ -16832,540 +13807,6 @@ void intel_plane_destroy(struct drm_plane *plane)
kfree(to_intel_plane(plane));
}
-static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
- u32 format, u64 modifier)
-{
- switch (modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- break;
- default:
- return false;
- }
-
- switch (format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_XRGB8888:
- return modifier == DRM_FORMAT_MOD_LINEAR ||
- modifier == I915_FORMAT_MOD_X_TILED;
- default:
- return false;
- }
-}
-
-static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
- u32 format, u64 modifier)
-{
- switch (modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- break;
- default:
- return false;
- }
-
- switch (format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_XBGR16161616F:
- return modifier == DRM_FORMAT_MOD_LINEAR ||
- modifier == I915_FORMAT_MOD_X_TILED;
- default:
- return false;
- }
-}
-
-static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
- u32 format, u64 modifier)
-{
- return modifier == DRM_FORMAT_MOD_LINEAR &&
- format == DRM_FORMAT_ARGB8888;
-}
-
-static const struct drm_plane_funcs i965_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = i965_plane_format_mod_supported,
-};
-
-static const struct drm_plane_funcs i8xx_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = i8xx_plane_format_mod_supported,
-};
-
-static int
-intel_legacy_cursor_update(struct drm_plane *_plane,
- struct drm_crtc *_crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- u32 src_x, u32 src_y,
- u32 src_w, u32 src_h,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct intel_plane *plane = to_intel_plane(_plane);
- struct intel_crtc *crtc = to_intel_crtc(_crtc);
- struct intel_plane_state *old_plane_state =
- to_intel_plane_state(plane->base.state);
- struct intel_plane_state *new_plane_state;
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_crtc_state *new_crtc_state;
- int ret;
-
- /*
- * When crtc is inactive or there is a modeset pending,
- * wait for it to complete in the slowpath
- *
- * FIXME bigjoiner fastpath would be good
- */
- if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
- crtc_state->update_pipe || crtc_state->bigjoiner)
- goto slow;
-
- /*
- * Don't do an async update if there is an outstanding commit modifying
- * the plane. This prevents our async update's changes from getting
- * overridden by a previous synchronous update's state.
- */
- if (old_plane_state->uapi.commit &&
- !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
- goto slow;
-
- /*
- * If any parameters change that may affect watermarks,
- * take the slowpath. Only changing fb or position should be
- * in the fastpath.
- */
- if (old_plane_state->uapi.crtc != &crtc->base ||
- old_plane_state->uapi.src_w != src_w ||
- old_plane_state->uapi.src_h != src_h ||
- old_plane_state->uapi.crtc_w != crtc_w ||
- old_plane_state->uapi.crtc_h != crtc_h ||
- !old_plane_state->uapi.fb != !fb)
- goto slow;
-
- new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
- if (!new_plane_state)
- return -ENOMEM;
-
- new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
- if (!new_crtc_state) {
- ret = -ENOMEM;
- goto out_free;
- }
-
- drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
-
- new_plane_state->uapi.src_x = src_x;
- new_plane_state->uapi.src_y = src_y;
- new_plane_state->uapi.src_w = src_w;
- new_plane_state->uapi.src_h = src_h;
- new_plane_state->uapi.crtc_x = crtc_x;
- new_plane_state->uapi.crtc_y = crtc_y;
- new_plane_state->uapi.crtc_w = crtc_w;
- new_plane_state->uapi.crtc_h = crtc_h;
-
- intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state, crtc);
-
- ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
- old_plane_state, new_plane_state);
- if (ret)
- goto out_free;
-
- ret = intel_plane_pin_fb(new_plane_state);
- if (ret)
- goto out_free;
-
- intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
- ORIGIN_FLIP);
- intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
- to_intel_frontbuffer(new_plane_state->hw.fb),
- plane->frontbuffer_bit);
-
- /* Swap plane state */
- plane->base.state = &new_plane_state->uapi;
-
- /*
- * We cannot swap crtc_state as it may be in use by an atomic commit or
- * page flip that's running simultaneously. If we swap crtc_state and
- * destroy the old state, we will cause a use-after-free there.
- *
- * Only update active_planes, which is needed for our internal
- * bookkeeping. Either value will do the right thing when updating
- * planes atomically. If the cursor was part of the atomic update then
- * we would have taken the slowpath.
- */
- crtc_state->active_planes = new_crtc_state->active_planes;
-
- if (new_plane_state->uapi.visible)
- intel_update_plane(plane, crtc_state, new_plane_state);
- else
- intel_disable_plane(plane, crtc_state);
-
- intel_plane_unpin_fb(old_plane_state);
-
-out_free:
- if (new_crtc_state)
- intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
- if (ret)
- intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
- else
- intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
- return ret;
-
-slow:
- return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h, ctx);
-}
-
-static const struct drm_plane_funcs intel_cursor_plane_funcs = {
- .update_plane = intel_legacy_cursor_update,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = intel_cursor_format_mod_supported,
-};
-
-static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
- enum i9xx_plane_id i9xx_plane)
-{
- if (!HAS_FBC(dev_priv))
- return false;
-
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- return i9xx_plane == PLANE_A; /* tied to pipe A */
- else if (IS_IVYBRIDGE(dev_priv))
- return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
- i9xx_plane == PLANE_C;
- else if (INTEL_GEN(dev_priv) >= 4)
- return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
- else
- return i9xx_plane == PLANE_A;
-}
-
-static struct intel_plane *
-intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- struct intel_plane *plane;
- const struct drm_plane_funcs *plane_funcs;
- unsigned int supported_rotations;
- const u32 *formats;
- int num_formats;
- int ret, zpos;
-
- if (INTEL_GEN(dev_priv) >= 9)
- return skl_universal_plane_create(dev_priv, pipe,
- PLANE_PRIMARY);
-
- plane = intel_plane_alloc();
- if (IS_ERR(plane))
- return plane;
-
- plane->pipe = pipe;
- /*
- * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
- * port is hooked to pipe B. Hence we want plane A feeding pipe B.
- */
- if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 &&
- INTEL_NUM_PIPES(dev_priv) == 2)
- plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
- else
- plane->i9xx_plane = (enum i9xx_plane_id) pipe;
- plane->id = PLANE_PRIMARY;
- plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
-
- plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
- if (plane->has_fbc) {
- struct intel_fbc *fbc = &dev_priv->fbc;
-
- fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
- }
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- formats = vlv_primary_formats;
- num_formats = ARRAY_SIZE(vlv_primary_formats);
- } else if (INTEL_GEN(dev_priv) >= 4) {
- /*
- * WaFP16GammaEnabling:ivb
- * "Workaround : When using the 64-bit format, the plane
- * output on each color channel has one quarter amplitude.
- * It can be brought up to full amplitude by using pipe
- * gamma correction or pipe color space conversion to
- * multiply the plane output by four."
- *
- * There is no dedicated plane gamma for the primary plane,
- * and using the pipe gamma/csc could conflict with other
- * planes, so we choose not to expose fp16 on IVB primary
- * planes. HSW primary planes no longer have this problem.
- */
- if (IS_IVYBRIDGE(dev_priv)) {
- formats = ivb_primary_formats;
- num_formats = ARRAY_SIZE(ivb_primary_formats);
- } else {
- formats = i965_primary_formats;
- num_formats = ARRAY_SIZE(i965_primary_formats);
- }
- } else {
- formats = i8xx_primary_formats;
- num_formats = ARRAY_SIZE(i8xx_primary_formats);
- }
-
- if (INTEL_GEN(dev_priv) >= 4)
- plane_funcs = &i965_plane_funcs;
- else
- plane_funcs = &i8xx_plane_funcs;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- plane->min_cdclk = vlv_plane_min_cdclk;
- else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- plane->min_cdclk = hsw_plane_min_cdclk;
- else if (IS_IVYBRIDGE(dev_priv))
- plane->min_cdclk = ivb_plane_min_cdclk;
- else
- plane->min_cdclk = i9xx_plane_min_cdclk;
-
- plane->max_stride = i9xx_plane_max_stride;
- plane->update_plane = i9xx_update_plane;
- plane->disable_plane = i9xx_disable_plane;
- plane->get_hw_state = i9xx_plane_get_hw_state;
- plane->check_plane = i9xx_plane_check;
-
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- 0, plane_funcs,
- formats, num_formats,
- i9xx_format_modifiers,
- DRM_PLANE_TYPE_PRIMARY,
- "primary %c", pipe_name(pipe));
- else
- ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- 0, plane_funcs,
- formats, num_formats,
- i9xx_format_modifiers,
- DRM_PLANE_TYPE_PRIMARY,
- "plane %c",
- plane_name(plane->i9xx_plane));
- if (ret)
- goto fail;
-
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
- supported_rotations =
- DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
- DRM_MODE_REFLECT_X;
- } else if (INTEL_GEN(dev_priv) >= 4) {
- supported_rotations =
- DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
- } else {
- supported_rotations = DRM_MODE_ROTATE_0;
- }
-
- if (INTEL_GEN(dev_priv) >= 4)
- drm_plane_create_rotation_property(&plane->base,
- DRM_MODE_ROTATE_0,
- supported_rotations);
-
- zpos = 0;
- drm_plane_create_zpos_immutable_property(&plane->base, zpos);
-
- drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
-
- return plane;
-
-fail:
- intel_plane_free(plane);
-
- return ERR_PTR(ret);
-}
-
-static struct intel_plane *
-intel_cursor_plane_create(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- struct intel_plane *cursor;
- int ret, zpos;
-
- cursor = intel_plane_alloc();
- if (IS_ERR(cursor))
- return cursor;
-
- cursor->pipe = pipe;
- cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
- cursor->id = PLANE_CURSOR;
- cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
-
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
- cursor->max_stride = i845_cursor_max_stride;
- cursor->update_plane = i845_update_cursor;
- cursor->disable_plane = i845_disable_cursor;
- cursor->get_hw_state = i845_cursor_get_hw_state;
- cursor->check_plane = i845_check_cursor;
- } else {
- cursor->max_stride = i9xx_cursor_max_stride;
- cursor->update_plane = i9xx_update_cursor;
- cursor->disable_plane = i9xx_disable_cursor;
- cursor->get_hw_state = i9xx_cursor_get_hw_state;
- cursor->check_plane = i9xx_check_cursor;
- }
-
- cursor->cursor.base = ~0;
- cursor->cursor.cntl = ~0;
-
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
- cursor->cursor.size = ~0;
-
- ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
- 0, &intel_cursor_plane_funcs,
- intel_cursor_formats,
- ARRAY_SIZE(intel_cursor_formats),
- cursor_format_modifiers,
- DRM_PLANE_TYPE_CURSOR,
- "cursor %c", pipe_name(pipe));
- if (ret)
- goto fail;
-
- if (INTEL_GEN(dev_priv) >= 4)
- drm_plane_create_rotation_property(&cursor->base,
- DRM_MODE_ROTATE_0,
- DRM_MODE_ROTATE_0 |
- DRM_MODE_ROTATE_180);
-
- zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
- drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
-
- if (INTEL_GEN(dev_priv) >= 12)
- drm_plane_enable_fb_damage_clips(&cursor->base);
-
- drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
-
- return cursor;
-
-fail:
- intel_plane_free(cursor);
-
- return ERR_PTR(ret);
-}
-
-#define INTEL_CRTC_FUNCS \
- .gamma_set = drm_atomic_helper_legacy_gamma_set, \
- .set_config = drm_atomic_helper_set_config, \
- .destroy = intel_crtc_destroy, \
- .page_flip = drm_atomic_helper_page_flip, \
- .atomic_duplicate_state = intel_crtc_duplicate_state, \
- .atomic_destroy_state = intel_crtc_destroy_state, \
- .set_crc_source = intel_crtc_set_crc_source, \
- .verify_crc_source = intel_crtc_verify_crc_source, \
- .get_crc_sources = intel_crtc_get_crc_sources
-
-static const struct drm_crtc_funcs bdw_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = g4x_get_vblank_counter,
- .enable_vblank = bdw_enable_vblank,
- .disable_vblank = bdw_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs ilk_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = g4x_get_vblank_counter,
- .enable_vblank = ilk_enable_vblank,
- .disable_vblank = ilk_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs g4x_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = g4x_get_vblank_counter,
- .enable_vblank = i965_enable_vblank,
- .disable_vblank = i965_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs i965_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = i915_get_vblank_counter,
- .enable_vblank = i965_enable_vblank,
- .disable_vblank = i965_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs i915gm_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = i915_get_vblank_counter,
- .enable_vblank = i915gm_enable_vblank,
- .disable_vblank = i915gm_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs i915_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = i915_get_vblank_counter,
- .enable_vblank = i8xx_enable_vblank,
- .disable_vblank = i8xx_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs i8xx_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- /* no hw vblank counter */
- .enable_vblank = i8xx_enable_vblank,
- .disable_vblank = i8xx_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static struct intel_crtc *intel_crtc_alloc(void)
-{
- struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc;
-
- crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
- if (!crtc)
- return ERR_PTR(-ENOMEM);
-
- crtc_state = intel_crtc_state_alloc(crtc);
- if (!crtc_state) {
- kfree(crtc);
- return ERR_PTR(-ENOMEM);
- }
-
- crtc->base.state = &crtc_state->uapi;
- crtc->config = crtc_state;
-
- return crtc;
-}
-
-static void intel_crtc_free(struct intel_crtc *crtc)
-{
- intel_crtc_destroy_state(&crtc->base, crtc->base.state);
- kfree(crtc);
-}
-
static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
{
struct intel_plane *plane;
@@ -17378,100 +13819,6 @@ static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
}
}
-static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- struct intel_plane *primary, *cursor;
- const struct drm_crtc_funcs *funcs;
- struct intel_crtc *crtc;
- int sprite, ret;
-
- crtc = intel_crtc_alloc();
- if (IS_ERR(crtc))
- return PTR_ERR(crtc);
-
- crtc->pipe = pipe;
- crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
-
- primary = intel_primary_plane_create(dev_priv, pipe);
- if (IS_ERR(primary)) {
- ret = PTR_ERR(primary);
- goto fail;
- }
- crtc->plane_ids_mask |= BIT(primary->id);
-
- for_each_sprite(dev_priv, pipe, sprite) {
- struct intel_plane *plane;
-
- plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
- if (IS_ERR(plane)) {
- ret = PTR_ERR(plane);
- goto fail;
- }
- crtc->plane_ids_mask |= BIT(plane->id);
- }
-
- cursor = intel_cursor_plane_create(dev_priv, pipe);
- if (IS_ERR(cursor)) {
- ret = PTR_ERR(cursor);
- goto fail;
- }
- crtc->plane_ids_mask |= BIT(cursor->id);
-
- if (HAS_GMCH(dev_priv)) {
- if (IS_CHERRYVIEW(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
- funcs = &g4x_crtc_funcs;
- else if (IS_GEN(dev_priv, 4))
- funcs = &i965_crtc_funcs;
- else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
- funcs = &i915gm_crtc_funcs;
- else if (IS_GEN(dev_priv, 3))
- funcs = &i915_crtc_funcs;
- else
- funcs = &i8xx_crtc_funcs;
- } else {
- if (INTEL_GEN(dev_priv) >= 8)
- funcs = &bdw_crtc_funcs;
- else
- funcs = &ilk_crtc_funcs;
- }
-
- ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
- &primary->base, &cursor->base,
- funcs, "pipe %c", pipe_name(pipe));
- if (ret)
- goto fail;
-
- BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
- dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
- dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
-
- if (INTEL_GEN(dev_priv) < 9) {
- enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
-
- BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
- dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
- dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
- }
-
- if (INTEL_GEN(dev_priv) >= 10)
- drm_crtc_create_scaling_filter_property(&crtc->base,
- BIT(DRM_SCALING_FILTER_DEFAULT) |
- BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
-
- intel_color_init(crtc);
-
- intel_crtc_crc_init(crtc);
-
- drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
-
- return 0;
-
-fail:
- intel_crtc_free(crtc);
-
- return ret;
-}
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -17554,48 +13901,12 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
return true;
}
-void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
-{
- int pps_num;
- int pps_idx;
-
- if (HAS_DDI(dev_priv))
- return;
- /*
- * This w/a is needed at least on CPT/PPT, but to be sure apply it
- * everywhere where registers can be write protected.
- */
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- pps_num = 2;
- else
- pps_num = 1;
-
- for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
- u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
-
- val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
- intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
- }
-}
-
-static void intel_pps_init(struct drm_i915_private *dev_priv)
-{
- if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
- dev_priv->pps_mmio_base = PCH_PPS_BASE;
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->pps_mmio_base = VLV_PPS_BASE;
- else
- dev_priv->pps_mmio_base = PPS_BASE;
-
- intel_pps_unlock_regs_wa(dev_priv);
-}
-
static void intel_setup_outputs(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
bool dpd_is_edp = false;
- intel_pps_init(dev_priv);
+ intel_pps_unlock_regs_wa(dev_priv);
if (!HAS_DISPLAY(dev_priv))
return;
@@ -17996,7 +14307,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err;
}
- if (is_gen12_ccs_plane(fb, i)) {
+ if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
if (fb->pitches[i] != ccs_aux_stride) {
@@ -18194,86 +14505,40 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
{
intel_init_cdclk_hooks(dev_priv);
+ intel_dpll_init_clock_hook(dev_priv);
+
if (INTEL_GEN(dev_priv) >= 9) {
dev_priv->display.get_pipe_config = hsw_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- skl_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
dev_priv->display.crtc_enable = hsw_crtc_enable;
dev_priv->display.crtc_disable = hsw_crtc_disable;
} else if (HAS_DDI(dev_priv)) {
dev_priv->display.get_pipe_config = hsw_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock =
- hsw_crtc_compute_clock;
dev_priv->display.crtc_enable = hsw_crtc_enable;
dev_priv->display.crtc_disable = hsw_crtc_disable;
} else if (HAS_PCH_SPLIT(dev_priv)) {
dev_priv->display.get_pipe_config = ilk_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock =
- ilk_crtc_compute_clock;
dev_priv->display.crtc_enable = ilk_crtc_enable;
dev_priv->display.crtc_disable = ilk_crtc_disable;
- } else if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
- dev_priv->display.crtc_enable = valleyview_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
- } else if (IS_VALLEYVIEW(dev_priv)) {
+ } else if (IS_CHERRYVIEW(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
- } else if (IS_G4X(dev_priv)) {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
- dev_priv->display.crtc_enable = i9xx_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
- } else if (IS_PINEVIEW(dev_priv)) {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
- dev_priv->display.crtc_enable = i9xx_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
- } else if (!IS_GEN(dev_priv, 2)) {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
- dev_priv->display.crtc_enable = i9xx_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
}
- if (IS_GEN(dev_priv, 5)) {
- dev_priv->display.fdi_link_train = ilk_fdi_link_train;
- } else if (IS_GEN(dev_priv, 6)) {
- dev_priv->display.fdi_link_train = gen6_fdi_link_train;
- } else if (IS_IVYBRIDGE(dev_priv)) {
- /* FIXME: detect B0+ stepping and use auto training */
- dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- }
+ intel_fdi_init_hook(dev_priv);
- if (INTEL_GEN(dev_priv) >= 9)
+ if (INTEL_GEN(dev_priv) >= 9) {
dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
- else
+ dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
+ } else {
dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
+ dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
+ }
}
@@ -18281,14 +14546,10 @@ void intel_modeset_init_hw(struct drm_i915_private *i915)
{
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(i915->cdclk.obj.state);
- struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->dbuf.obj.state);
intel_update_cdclk(i915);
intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
-
- dbuf_state->enabled_slices = i915->dbuf.enabled_slices;
}
static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
@@ -18521,8 +14782,7 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
mode_config->funcs = &intel_mode_funcs;
- if (INTEL_GEN(i915) >= 9)
- mode_config->async_page_flip = true;
+ mode_config->async_page_flip = has_async_flips(i915);
/*
* Maximum framebuffer dimensions, chosen to match
@@ -18607,6 +14867,8 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
+ i915->framestart_delay = 1; /* 1-4 */
+
intel_mode_config_init(i915);
ret = intel_cdclk_init(i915);
@@ -18653,6 +14915,8 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
intel_panel_sanitize_ssc(i915);
+ intel_pps_setup(i915);
+
intel_gmbus_setup(i915);
drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
@@ -18941,7 +15205,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
val = intel_de_read(dev_priv, reg);
val &= ~HSW_FRAME_START_DELAY_MASK;
- val |= HSW_FRAME_START_DELAY(0);
+ val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
} else {
i915_reg_t reg = PIPECONF(cpu_transcoder);
@@ -18949,7 +15213,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
val = intel_de_read(dev_priv, reg);
val &= ~PIPECONF_FRAME_START_DELAY_MASK;
- val |= PIPECONF_FRAME_START_DELAY(0);
+ val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
}
@@ -18962,7 +15226,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
val = intel_de_read(dev_priv, reg);
val &= ~TRANS_FRAME_START_DELAY_MASK;
- val |= TRANS_FRAME_START_DELAY(0);
+ val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
} else {
enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
@@ -18971,7 +15235,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
val = intel_de_read(dev_priv, reg);
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
- val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
}
}
@@ -19164,7 +15428,7 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- fixup_active_planes(crtc_state);
+ fixup_plane_bitmasks(crtc_state);
}
}
@@ -19587,7 +15851,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
put_domains = modeset_get_crtc_power_domains(crtc_state);
if (drm_WARN_ON(dev, put_domains))
- modeset_put_power_domains(dev_priv, put_domains);
+ modeset_put_crtc_power_domains(crtc, put_domains);
}
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 5e0d42d82c11..64ffa34544a7 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -499,6 +499,8 @@ enum phy_fia {
((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \
(new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1))
+int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
u8 intel_calc_active_pipes(struct intel_atomic_state *state,
u8 active_pipes);
void intel_link_compute_m_n(u16 bpp, int nlanes,
@@ -544,7 +546,6 @@ unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info
unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
int intel_display_suspend(struct drm_device *dev);
-void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
void intel_encoder_destroy(struct drm_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
@@ -628,11 +629,9 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
u32 skl_plane_stride(const struct intel_plane_state *plane_state,
int plane);
int skl_check_plane_surface(struct intel_plane_state *plane_state);
-int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
+ int *x, int *y, u32 *offset);
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
-unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation);
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
@@ -643,7 +642,17 @@ void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
- uint64_t modifier);
+ u64 modifier);
+
+int intel_plane_compute_gtt(struct intel_plane_state *plane_state);
+u32 intel_plane_compute_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane);
+int intel_plane_pin_fb(struct intel_plane_state *plane_state);
+void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
+struct intel_encoder *
+intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state);
/* modesetting */
void intel_modeset_init_hw(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index ca41e8c00ad7..d62b18d5ecd8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -18,6 +18,7 @@
#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_sideband.h"
+#include "intel_sprite.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -865,6 +866,110 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
}
}
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
+static void crtc_updates_info(struct seq_file *m,
+ struct intel_crtc *crtc,
+ const char *hdr)
+{
+ u64 count;
+ int row;
+
+ count = 0;
+ for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++)
+ count += crtc->debug.vbl.times[row];
+ seq_printf(m, "%sUpdates: %llu\n", hdr, count);
+ if (!count)
+ return;
+
+ for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) {
+ char columns[80] = " |";
+ unsigned int x;
+
+ if (row & 1) {
+ const char *units;
+
+ if (row > 10) {
+ x = 1000000;
+ units = "ms";
+ } else {
+ x = 1000;
+ units = "us";
+ }
+
+ snprintf(columns, sizeof(columns), "%4ld%s |",
+ DIV_ROUND_CLOSEST(BIT(row + 9), x), units);
+ }
+
+ if (crtc->debug.vbl.times[row]) {
+ x = ilog2(crtc->debug.vbl.times[row]);
+ memset(columns + 8, '*', x);
+ columns[8 + x] = '\0';
+ }
+
+ seq_printf(m, "%s%s\n", hdr, columns);
+ }
+
+ seq_printf(m, "%sMin update: %lluns\n",
+ hdr, crtc->debug.vbl.min);
+ seq_printf(m, "%sMax update: %lluns\n",
+ hdr, crtc->debug.vbl.max);
+ seq_printf(m, "%sAverage update: %lluns\n",
+ hdr, div64_u64(crtc->debug.vbl.sum, count));
+ seq_printf(m, "%sOverruns > %uus: %u\n",
+ hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over);
+}
+
+static int crtc_updates_show(struct seq_file *m, void *data)
+{
+ crtc_updates_info(m, m->private, "");
+ return 0;
+}
+
+static int crtc_updates_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, crtc_updates_show, inode->i_private);
+}
+
+static ssize_t crtc_updates_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct intel_crtc *crtc = m->private;
+
+ /* May race with an update. Meh. */
+ memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl));
+
+ return len;
+}
+
+static const struct file_operations crtc_updates_fops = {
+ .owner = THIS_MODULE,
+ .open = crtc_updates_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = crtc_updates_write
+};
+
+static void crtc_updates_add(struct drm_crtc *crtc)
+{
+ debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry,
+ to_intel_crtc(crtc), &crtc_updates_fops);
+}
+
+#else
+static void crtc_updates_info(struct seq_file *m,
+ struct intel_crtc *crtc,
+ const char *hdr)
+{
+}
+
+static void crtc_updates_add(struct drm_crtc *crtc)
+{
+}
+#endif
+
static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -907,6 +1012,8 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
yesno(!crtc->cpu_fifo_underrun_disabled),
yesno(!crtc->pch_fifo_underrun_disabled));
+
+ crtc_updates_info(m, crtc, "\t");
}
static int i915_display_info(struct seq_file *m, void *unused)
@@ -1032,7 +1139,6 @@ static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
if (!dev_priv->ipc_enabled && enable)
drm_info(&dev_priv->drm,
"Enabling IPC: WM will be proper only after next commit\n");
- dev_priv->wm.distrust_bios_wm = true;
dev_priv->ipc_enabled = enable;
intel_enable_ipc(dev_priv);
}
@@ -2048,13 +2154,13 @@ static int i915_panel_show(struct seq_file *m, void *data)
return -ENODEV;
seq_printf(m, "Panel power up delay: %d\n",
- intel_dp->panel_power_up_delay);
+ intel_dp->pps.panel_power_up_delay);
seq_printf(m, "Panel power down delay: %d\n",
- intel_dp->panel_power_down_delay);
+ intel_dp->pps.panel_power_down_delay);
seq_printf(m, "Backlight on delay: %d\n",
- intel_dp->backlight_on_delay);
+ intel_dp->pps.backlight_on_delay);
seq_printf(m, "Backlight off delay: %d\n",
- intel_dp->backlight_off_delay);
+ intel_dp->pps.backlight_off_delay);
return 0;
}
@@ -2278,3 +2384,20 @@ int intel_connector_debugfs_add(struct drm_connector *connector)
return 0;
}
+
+/**
+ * intel_crtc_debugfs_add - add i915 specific crtc debugfs files
+ * @crtc: pointer to a drm_crtc
+ *
+ * Returns 0 on success, negative error codes on error.
+ *
+ * Failure to add debugfs entries should generally be ignored.
+ */
+int intel_crtc_debugfs_add(struct drm_crtc *crtc)
+{
+ if (!crtc->debugfs_entry)
+ return -ENODEV;
+
+ crtc_updates_add(crtc);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
index c922c1745bfe..557901f3eb90 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -7,14 +7,17 @@
#define __INTEL_DISPLAY_DEBUGFS_H__
struct drm_connector;
+struct drm_crtc;
struct drm_i915_private;
#ifdef CONFIG_DEBUG_FS
void intel_display_debugfs_register(struct drm_i915_private *i915);
int intel_connector_debugfs_add(struct drm_connector *connector);
+int intel_crtc_debugfs_add(struct drm_crtc *crtc);
#else
static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {}
static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; }
+static inline int intel_crtc_debugfs_add(struct drm_crtc *crtc) { return 0; }
#endif
#endif /* __INTEL_DISPLAY_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index fe2d90bba536..c11c37c65d86 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -4,7 +4,6 @@
*/
#include "display/intel_crt.h"
-#include "display/intel_dp.h"
#include "i915_drv.h"
#include "i915_irq.h"
@@ -16,6 +15,7 @@
#include "intel_dpio_phy.h"
#include "intel_hotplug.h"
#include "intel_pm.h"
+#include "intel_pps.h"
#include "intel_sideband.h"
#include "intel_tc.h"
#include "intel_vga.h"
@@ -936,7 +936,7 @@ static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
* because PPS registers are always on.
*/
if (!HAS_PCH_SPLIT(dev_priv))
- intel_power_sequencer_reset(dev_priv);
+ intel_pps_reset_all(dev_priv);
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
}
@@ -1446,7 +1446,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
/* make sure we're done processing display irqs */
intel_synchronize_irq(dev_priv);
- intel_power_sequencer_reset(dev_priv);
+ intel_pps_reset_all(dev_priv);
/* Prevent us from re-enabling polling on accident in late suspend */
if (!dev_priv->drm.dev->power.is_suspended)
@@ -2184,26 +2184,6 @@ static void __intel_display_power_put(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
}
-/**
- * intel_display_power_put_unchecked - release an unchecked power domain reference
- * @dev_priv: i915 device instance
- * @domain: power domain to reference
- *
- * This function drops the power domain reference obtained by
- * intel_display_power_get() and might power down the corresponding hardware
- * block right away if this is the last reference.
- *
- * This function exists only for historical reasons and should be avoided in
- * new code, as the correctness of its use cannot be checked. Always use
- * intel_display_power_put() instead.
- */
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- __intel_display_power_put(dev_priv, domain);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
-}
-
static void
queue_async_put_domains_work(struct i915_power_domains *power_domains,
intel_wakeref_t wakeref)
@@ -2410,8 +2390,85 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
__intel_display_power_put(dev_priv, domain);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
+#else
+/**
+ * intel_display_power_put_unchecked - release an unchecked power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ *
+ * This function is only for the power domain code's internal use to suppress wakeref
+ * tracking when the correspondig debug kconfig option is disabled, should not
+ * be used otherwise.
+ */
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ __intel_display_power_put(dev_priv, domain);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+}
#endif
+void
+intel_display_power_get_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ enum intel_display_power_domain domain)
+{
+ intel_wakeref_t __maybe_unused wf;
+
+ drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
+
+ wf = intel_display_power_get(i915, domain);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ power_domain_set->wakerefs[domain] = wf;
+#endif
+ power_domain_set->mask |= BIT_ULL(domain);
+}
+
+bool
+intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ enum intel_display_power_domain domain)
+{
+ intel_wakeref_t wf;
+
+ drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
+
+ wf = intel_display_power_get_if_enabled(i915, domain);
+ if (!wf)
+ return false;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ power_domain_set->wakerefs[domain] = wf;
+#endif
+ power_domain_set->mask |= BIT_ULL(domain);
+
+ return true;
+}
+
+void
+intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ u64 mask)
+{
+ enum intel_display_power_domain domain;
+
+ drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask);
+
+ for_each_power_domain(domain, mask) {
+ intel_wakeref_t __maybe_unused wf = -1;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
+#endif
+ intel_display_power_put(i915, domain, wf);
+ power_domain_set->mask &= ~BIT_ULL(domain);
+ }
+}
+
#define I830_PIPES_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PIPE_A) | \
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
@@ -5601,12 +5658,16 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
* resources powered until display HW readout is complete. We drop
* this reference in intel_power_domains_enable().
*/
- power_domains->wakeref =
+ drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
/* Disable power support if the user asked so. */
- if (!i915->params.disable_power_well)
- intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ if (!i915->params.disable_power_well) {
+ drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
+ i915->power_domains.disable_wakeref = intel_display_power_get(i915,
+ POWER_DOMAIN_INIT);
+ }
intel_power_domains_sync_hw(i915);
power_domains->initializing = false;
@@ -5626,11 +5687,12 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
void intel_power_domains_driver_remove(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->power_domains.wakeref);
+ fetch_and_zero(&i915->power_domains.init_wakeref);
/* Remove the refcount we took to keep power well support disabled. */
if (!i915->params.disable_power_well)
- intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+ intel_display_power_put(i915, POWER_DOMAIN_INIT,
+ fetch_and_zero(&i915->power_domains.disable_wakeref));
intel_display_power_flush_work_sync(i915);
@@ -5655,7 +5717,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
void intel_power_domains_enable(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->power_domains.wakeref);
+ fetch_and_zero(&i915->power_domains.init_wakeref);
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
intel_power_domains_verify_state(i915);
@@ -5672,8 +5734,8 @@ void intel_power_domains_disable(struct drm_i915_private *i915)
{
struct i915_power_domains *power_domains = &i915->power_domains;
- drm_WARN_ON(&i915->drm, power_domains->wakeref);
- power_domains->wakeref =
+ drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
intel_power_domains_verify_state(i915);
@@ -5695,7 +5757,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
{
struct i915_power_domains *power_domains = &i915->power_domains;
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&power_domains->wakeref);
+ fetch_and_zero(&power_domains->init_wakeref);
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
@@ -5719,7 +5781,8 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
* power wells if power domains must be deinitialized for suspend.
*/
if (!i915->params.disable_power_well)
- intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+ intel_display_power_put(i915, POWER_DOMAIN_INIT,
+ fetch_and_zero(&i915->power_domains.disable_wakeref));
intel_display_power_flush_work(i915);
intel_power_domains_verify_state(i915);
@@ -5754,8 +5817,8 @@ void intel_power_domains_resume(struct drm_i915_private *i915)
intel_power_domains_init_hw(i915, true);
power_domains->display_core_suspended = false;
} else {
- drm_WARN_ON(&i915->drm, power_domains->wakeref);
- power_domains->wakeref =
+ drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 4aa0a09cf14f..bc30c479be53 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -212,7 +212,8 @@ struct i915_power_domains {
bool display_core_suspended;
int power_well_count;
- intel_wakeref_t wakeref;
+ intel_wakeref_t init_wakeref;
+ intel_wakeref_t disable_wakeref;
struct mutex lock;
int domain_use_count[POWER_DOMAIN_NUM];
@@ -224,6 +225,13 @@ struct i915_power_domains {
struct i915_power_well *power_wells;
};
+struct intel_display_power_domain_set {
+ u64 mask;
+#ifdef CONFIG_DRM_I915_DEBUG_RUNTIME_PM
+ intel_wakeref_t wakerefs[POWER_DOMAIN_NUM];
+#endif
+};
+
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
for_each_if(BIT_ULL(domain) & (mask))
@@ -279,8 +287,6 @@ intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
intel_wakeref_t
intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
void __intel_display_power_put_async(struct drm_i915_private *i915,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref);
@@ -297,6 +303,9 @@ intel_display_power_put_async(struct drm_i915_private *i915,
__intel_display_power_put_async(i915, domain, wakeref);
}
#else
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+
static inline void
intel_display_power_put(struct drm_i915_private *i915,
enum intel_display_power_domain domain,
@@ -314,6 +323,28 @@ intel_display_power_put_async(struct drm_i915_private *i915,
}
#endif
+void
+intel_display_power_get_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ enum intel_display_power_domain domain);
+
+bool
+intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ enum intel_display_power_domain domain);
+
+void
+intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ u64 mask);
+
+static inline void
+intel_display_power_put_all_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set)
+{
+ intel_display_power_put_mask_in_set(i915, power_domain_set, power_domain_set->mask);
+}
+
enum dbuf_slice {
DBUF_S1,
DBUF_S2,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 34d78c654df3..39397748b4b0 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -225,6 +225,17 @@ struct intel_encoder {
const struct drm_connector *audio_connector;
};
+struct intel_panel_bl_funcs {
+ /* Connector and platform specific backlight functions */
+ int (*setup)(struct intel_connector *connector, enum pipe pipe);
+ u32 (*get)(struct intel_connector *connector, enum pipe pipe);
+ void (*set)(const struct drm_connector_state *conn_state, u32 level);
+ void (*disable)(const struct drm_connector_state *conn_state, u32 level);
+ void (*enable)(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level);
+ u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
+};
+
struct intel_panel {
struct drm_display_mode *fixed_mode;
struct drm_display_mode *downclock_mode;
@@ -241,24 +252,28 @@ struct intel_panel {
bool alternate_pwm_increment; /* lpt+ */
/* PWM chip */
+ u32 pwm_level_min;
+ u32 pwm_level_max;
+ bool pwm_enabled;
bool util_pin_active_low; /* bxt+ */
u8 controller; /* bxt+ only */
struct pwm_device *pwm;
struct pwm_state pwm_state;
/* DPCD backlight */
- u8 pwmgen_bit_count;
+ union {
+ struct {
+ u8 pwmgen_bit_count;
+ } vesa;
+ struct {
+ bool sdr_uses_aux;
+ } intel;
+ } edp;
struct backlight_device *device;
- /* Connector and platform specific backlight functions */
- int (*setup)(struct intel_connector *connector, enum pipe pipe);
- u32 (*get)(struct intel_connector *connector);
- void (*set)(const struct drm_connector_state *conn_state, u32 level);
- void (*disable)(const struct drm_connector_state *conn_state);
- void (*enable)(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
- u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
+ const struct intel_panel_bl_funcs *funcs;
+ const struct intel_panel_bl_funcs *pwm_funcs;
void (*power)(struct intel_connector *, bool enable);
} backlight;
};
@@ -339,6 +354,10 @@ struct intel_hdcp_shim {
enum transcoder cpu_transcoder,
bool enable);
+ /* Enable/Disable stream encryption on DP MST Transport Link */
+ int (*stream_encryption)(struct intel_connector *connector,
+ bool enable);
+
/* Ensures the link is still protected */
bool (*check_link)(struct intel_digital_port *dig_port,
struct intel_connector *connector);
@@ -370,8 +389,13 @@ struct intel_hdcp_shim {
int (*config_stream_type)(struct intel_digital_port *dig_port,
bool is_repeater, u8 type);
+ /* Enable/Disable HDCP 2.2 stream encryption on DP MST Transport Link */
+ int (*stream_2_2_encryption)(struct intel_connector *connector,
+ bool enable);
+
/* HDCP2.2 Link Integrity Check */
- int (*check_2_2_link)(struct intel_digital_port *dig_port);
+ int (*check_2_2_link)(struct intel_digital_port *dig_port,
+ struct intel_connector *connector);
};
struct intel_hdcp {
@@ -398,7 +422,6 @@ struct intel_hdcp {
* content can flow only through a link protected by HDCP2.2.
*/
u8 content_type;
- struct hdcp_port_data port_data;
bool is_paired;
bool is_repeater;
@@ -432,6 +455,8 @@ struct intel_hdcp {
* Hence caching the transcoder here.
*/
enum transcoder cpu_transcoder;
+ /* Only used for DP MST stream encryption */
+ enum transcoder stream_transcoder;
};
struct intel_connector {
@@ -531,7 +556,7 @@ struct intel_plane_state {
struct drm_framebuffer *fb;
u16 alpha;
- uint16_t pixel_blend_mode;
+ u16 pixel_blend_mode;
unsigned int rotation;
enum drm_color_encoding color_encoding;
enum drm_color_range color_range;
@@ -604,6 +629,11 @@ struct intel_plane_state {
u32 planar_slave;
struct drm_intel_sprite_colorkey ckey;
+
+ struct drm_rect psr2_sel_fetch_area;
+
+ /* Clear Color Value */
+ u64 ccval;
};
struct intel_initial_plane_config {
@@ -663,6 +693,8 @@ struct intel_crtc_scaler_state {
#define I915_MODE_FLAG_DSI_USE_TE1 (1<<4)
/* Flag to indicate mipi dsi periodic command mode where we do not get TE */
#define I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE (1<<5)
+/* Do tricks to make vblank timestamps sane with VRR? */
+#define I915_MODE_FLAG_VRR (1<<6)
struct intel_wm_level {
bool enable;
@@ -1047,7 +1079,10 @@ struct intel_crtc_state {
u32 cgm_mode;
};
- /* bitmask of visible planes (enum plane_id) */
+ /* bitmask of logically enabled planes (enum plane_id) */
+ u8 enabled_planes;
+
+ /* bitmask of actually visible planes (enum plane_id) */
u8 active_planes;
u8 nv12_planes;
u8 c8_planes;
@@ -1118,6 +1153,13 @@ struct intel_crtc_state {
struct intel_dsb *dsb;
u32 psr2_man_track_ctl;
+
+ /* Variable Refresh Rate state */
+ struct {
+ bool enable;
+ u8 pipeline_full;
+ u16 flipline, vmin, vmax;
+ } vrr;
};
enum intel_pipe_crc_source {
@@ -1160,7 +1202,9 @@ struct intel_crtc {
/* I915_MODE_FLAG_* */
u8 mode_flags;
- unsigned long long enabled_power_domains;
+ u16 vmax_vblank_start;
+
+ struct intel_display_power_domain_set enabled_power_domains;
struct intel_overlay *overlay;
struct intel_crtc_state *config;
@@ -1186,6 +1230,15 @@ struct intel_crtc {
ktime_t start_vbl_time;
int min_vbl, max_vbl;
int scanline_start;
+#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE
+ struct {
+ u64 min;
+ u64 max;
+ u64 sum;
+ unsigned int over;
+ unsigned int times[17]; /* [1us, 16ms] */
+ } vbl;
+#endif
} debug;
/* scalers available on this crtc */
@@ -1203,6 +1256,7 @@ struct intel_plane {
enum pipe pipe;
bool has_fbc;
bool has_ccs;
+ bool need_async_flip_disable_wa;
u32 frontbuffer_bit;
struct {
@@ -1239,7 +1293,10 @@ struct intel_plane {
const struct intel_plane_state *plane_state);
void (*async_flip)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
+ const struct intel_plane_state *plane_state,
+ bool async_flip);
+ void (*enable_flip_done)(struct intel_plane *plane);
+ void (*disable_flip_done)(struct intel_plane *plane);
};
struct intel_watermark_params {
@@ -1321,6 +1378,43 @@ struct intel_dp_compliance {
u8 test_lane_count;
};
+struct intel_dp_pcon_frl {
+ bool is_trained;
+ int trained_rate_gbps;
+};
+
+struct intel_pps {
+ int panel_power_up_delay;
+ int panel_power_down_delay;
+ int panel_power_cycle_delay;
+ int backlight_on_delay;
+ int backlight_off_delay;
+ struct delayed_work panel_vdd_work;
+ bool want_panel_vdd;
+ unsigned long last_power_on;
+ unsigned long last_backlight_off;
+ ktime_t panel_power_off_time;
+ intel_wakeref_t vdd_wakeref;
+
+ /*
+ * Pipe whose power sequencer is currently locked into
+ * this port. Only relevant on VLV/CHV.
+ */
+ enum pipe pps_pipe;
+ /*
+ * Pipe currently driving the port. Used for preventing
+ * the use of the PPS for any pipe currentrly driving
+ * external DP as that will mess things up on VLV.
+ */
+ enum pipe active_pipe;
+ /*
+ * Set if the sequencer may be reset due to a power transition,
+ * requiring a reinitialization. Only relevant on BXT.
+ */
+ bool pps_reset;
+ struct edp_power_seq pps_delays;
+};
+
struct intel_dp {
i915_reg_t output_reg;
u32 DP;
@@ -1331,6 +1425,7 @@ struct intel_dp {
bool has_hdmi_sink;
bool has_audio;
bool reset_link_params;
+ bool use_max_params;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
@@ -1339,6 +1434,7 @@ struct intel_dp {
u8 lttpr_common_caps[DP_LTTPR_COMMON_CAP_SIZE];
u8 lttpr_phy_caps[DP_MAX_LTTPR_COUNT][DP_LTTPR_PHY_CAP_SIZE];
u8 fec_capable;
+ u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE];
/* source rates */
int num_source_rates;
const int *source_rates;
@@ -1355,38 +1451,11 @@ struct intel_dp {
int max_link_rate;
/* sink or branch descriptor */
struct drm_dp_desc desc;
- u32 edid_quirks;
struct drm_dp_aux aux;
u32 aux_busy_last_status;
u8 train_set[4];
- int panel_power_up_delay;
- int panel_power_down_delay;
- int panel_power_cycle_delay;
- int backlight_on_delay;
- int backlight_off_delay;
- struct delayed_work panel_vdd_work;
- bool want_panel_vdd;
- unsigned long last_power_on;
- unsigned long last_backlight_off;
- ktime_t panel_power_off_time;
- /*
- * Pipe whose power sequencer is currently locked into
- * this port. Only relevant on VLV/CHV.
- */
- enum pipe pps_pipe;
- /*
- * Pipe currently driving the port. Used for preventing
- * the use of the PPS for any pipe currentrly driving
- * external DP as that will mess things up on VLV.
- */
- enum pipe active_pipe;
- /*
- * Set if the sequencer may be reset due to a power transition,
- * requiring a reinitialization. Only relevant on BXT.
- */
- bool pps_reset;
- struct edp_power_seq pps_delays;
+ struct intel_pps pps;
bool can_mst; /* this port supports mst */
bool is_mst;
@@ -1432,8 +1501,10 @@ struct intel_dp {
struct {
int min_tmds_clock, max_tmds_clock;
int max_dotclock;
+ int pcon_max_frl_bw;
u8 max_bpc;
bool ycbcr_444_to_420;
+ bool rgb_to_ycbcr;
} dfp;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
@@ -1444,6 +1515,8 @@ struct intel_dp {
bool hobl_failed;
bool hobl_active;
+
+ struct intel_dp_pcon_frl frl;
};
enum lspcon_vendor {
@@ -1453,6 +1526,7 @@ enum lspcon_vendor {
struct intel_lspcon {
bool active;
+ bool hdr_supported;
enum drm_lspcon_mode mode;
enum lspcon_vendor vendor;
};
@@ -1469,6 +1543,8 @@ struct intel_digital_port {
/* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
enum aux_ch aux_ch;
enum intel_display_power_domain ddi_io_power_domain;
+ intel_wakeref_t ddi_io_wakeref;
+ intel_wakeref_t aux_wakeref;
struct mutex tc_lock; /* protects the TypeC port mode */
intel_wakeref_t tc_lock_wakeref;
int tc_link_refcount;
@@ -1478,10 +1554,14 @@ struct intel_digital_port {
enum phy_fia tc_phy_fia;
u8 tc_phy_fia_idx;
- /* protects num_hdcp_streams reference count */
+ /* protects num_hdcp_streams reference count, hdcp_port_data and hdcp_auth_status */
struct mutex hdcp_mutex;
/* the number of pipes using HDCP signalling out of this port */
unsigned int num_hdcp_streams;
+ /* port HDCP auth status */
+ bool hdcp_auth_status;
+ /* HDCP port data need to pass to security f/w */
+ struct hdcp_port_data hdcp_port_data;
void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -1758,6 +1838,12 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
(1 << INTEL_OUTPUT_EDP));
}
+static inline bool
+intel_crtc_needs_modeset(const struct intel_crtc_state *crtc_state)
+{
+ return drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
+}
+
static inline void
intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
@@ -1780,4 +1866,32 @@ static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
return i915_ggtt_offset(state->vma);
}
+static inline struct intel_frontbuffer *
+to_intel_frontbuffer(struct drm_framebuffer *fb)
+{
+ return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
+}
+
+static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->params.panel_use_ssc >= 0)
+ return dev_priv->params.panel_use_ssc != 0;
+ return dev_priv->vbt.lvds_use_ssc
+ && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
+}
+
+static inline u32 i9xx_dpll_compute_fp(struct dpll *dpll)
+{
+ return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
+}
+
+static inline u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *pipe_config)
+{
+ if (HAS_DDI(dev_priv))
+ return pipe_config->port_clock; /* SPLL */
+ else
+ return dev_priv->fdi_pll_freq;
+}
+
#endif /* __INTEL_DISPLAY_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 8a26307c4896..8c12d5375607 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -41,13 +41,13 @@
#include "i915_debugfs.h"
#include "i915_drv.h"
-#include "i915_trace.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_display_types.h"
#include "intel_dp.h"
+#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
@@ -58,10 +58,12 @@
#include "intel_lspcon.h"
#include "intel_lvds.h"
#include "intel_panel.h"
+#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_sideband.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
+#include "intel_vrr.h"
#define DP_DPRX_ESI_LEN 14
@@ -121,6 +123,11 @@ static const struct dp_link_dpll chv_dpll[] = {
{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
};
+const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
+{
+ return IS_CHERRYVIEW(i915) ? &chv_dpll[0].dpll : &vlv_dpll[0].dpll;
+}
+
/* Constants for DP DSC configurations */
static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
@@ -145,12 +152,6 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
static void intel_dp_link_down(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state);
-static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
-static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
- enum pipe pipe);
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
/* update sink rates from dpcd */
@@ -162,8 +163,7 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
int i, max_rate;
int max_lttpr_rate;
- if (drm_dp_has_quirk(&intel_dp->desc, 0,
- DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
+ if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
/* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
static const int quirk_rates[] = { 162000, 270000, 324000 };
@@ -480,6 +480,13 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
return -1;
}
+ if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) {
+ drm_dbg_kms(&i915->drm,
+ "Retrying Link training for eDP with max parameters\n");
+ intel_dp->use_max_params = true;
+ return 0;
+ }
+
index = intel_dp_rate_index(intel_dp->common_rates,
intel_dp->num_common_rates,
link_rate);
@@ -651,6 +658,10 @@ intel_dp_output_format(struct drm_connector *connector,
!drm_mode_is_420_only(info, mode))
return INTEL_OUTPUT_FORMAT_RGB;
+ if (intel_dp->dfp.rgb_to_ycbcr &&
+ intel_dp->dfp.ycbcr_444_to_420)
+ return INTEL_OUTPUT_FORMAT_RGB;
+
if (intel_dp->dfp.ycbcr_444_to_420)
return INTEL_OUTPUT_FORMAT_YCBCR444;
else
@@ -716,6 +727,25 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
const struct drm_display_info *info = &connector->base.display_info;
int tmds_clock;
+ /* If PCON supports FRL MODE, check FRL bandwidth constraints */
+ if (intel_dp->dfp.pcon_max_frl_bw) {
+ int target_bw;
+ int max_frl_bw;
+ int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode);
+
+ target_bw = bpp * target_clock;
+
+ max_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
+
+ /* converting bw from Gbps to Kbps*/
+ max_frl_bw = max_frl_bw * 1000000;
+
+ if (target_bw > max_frl_bw)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+ }
+
if (intel_dp->dfp.max_dotclock &&
target_clock > intel_dp->dfp.max_dotclock)
return MODE_CLOCK_HIGH;
@@ -833,1129 +863,6 @@ intel_dp_mode_valid(struct drm_connector *connector,
return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
}
-u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
-{
- int i;
- u32 v = 0;
-
- if (src_bytes > 4)
- src_bytes = 4;
- for (i = 0; i < src_bytes; i++)
- v |= ((u32)src[i]) << ((3 - i) * 8);
- return v;
-}
-
-static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
-{
- int i;
- if (dst_bytes > 4)
- dst_bytes = 4;
- for (i = 0; i < dst_bytes; i++)
- dst[i] = src >> ((3-i) * 8);
-}
-
-static void
-intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
-static void
-intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
- bool force_disable_vdd);
-static void
-intel_dp_pps_init(struct intel_dp *intel_dp);
-
-static intel_wakeref_t
-pps_lock(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- intel_wakeref_t wakeref;
-
- /*
- * See intel_power_sequencer_reset() why we need
- * a power domain reference here.
- */
- wakeref = intel_display_power_get(dev_priv,
- intel_aux_power_domain(dp_to_dig_port(intel_dp)));
-
- mutex_lock(&dev_priv->pps_mutex);
-
- return wakeref;
-}
-
-static intel_wakeref_t
-pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- mutex_unlock(&dev_priv->pps_mutex);
- intel_display_power_put(dev_priv,
- intel_aux_power_domain(dp_to_dig_port(intel_dp)),
- wakeref);
- return 0;
-}
-
-#define with_pps_lock(dp, wf) \
- for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
-
-static void
-vlv_power_sequencer_kick(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum pipe pipe = intel_dp->pps_pipe;
- bool pll_enabled, release_cl_override = false;
- enum dpio_phy phy = DPIO_PHY(pipe);
- enum dpio_channel ch = vlv_pipe_to_channel(pipe);
- u32 DP;
-
- if (drm_WARN(&dev_priv->drm,
- intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
- "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
- pipe_name(pipe), dig_port->base.base.base.id,
- dig_port->base.base.name))
- return;
-
- drm_dbg_kms(&dev_priv->drm,
- "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(pipe), dig_port->base.base.base.id,
- dig_port->base.base.name);
-
- /* Preserve the BIOS-computed detected bit. This is
- * supposed to be read-only.
- */
- DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
- DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
- DP |= DP_PORT_WIDTH(1);
- DP |= DP_LINK_TRAIN_PAT_1;
-
- if (IS_CHERRYVIEW(dev_priv))
- DP |= DP_PIPE_SEL_CHV(pipe);
- else
- DP |= DP_PIPE_SEL(pipe);
-
- pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
-
- /*
- * The DPLL for the pipe must be enabled for this to work.
- * So enable temporarily it if it's not already enabled.
- */
- if (!pll_enabled) {
- release_cl_override = IS_CHERRYVIEW(dev_priv) &&
- !chv_phy_powergate_ch(dev_priv, phy, ch, true);
-
- if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
- &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
- drm_err(&dev_priv->drm,
- "Failed to force on pll for pipe %c!\n",
- pipe_name(pipe));
- return;
- }
- }
-
- /*
- * Similar magic as in intel_dp_enable_port().
- * We _must_ do this port enable + disable trick
- * to make this power sequencer lock onto the port.
- * Otherwise even VDD force bit won't work.
- */
- intel_de_write(dev_priv, intel_dp->output_reg, DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-
- intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-
- intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-
- if (!pll_enabled) {
- vlv_force_pll_off(dev_priv, pipe);
-
- if (release_cl_override)
- chv_phy_powergate_ch(dev_priv, phy, ch, false);
- }
-}
-
-static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
-{
- struct intel_encoder *encoder;
- unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
-
- /*
- * We don't have power sequencer currently.
- * Pick one that's not used by other ports.
- */
- for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- if (encoder->type == INTEL_OUTPUT_EDP) {
- drm_WARN_ON(&dev_priv->drm,
- intel_dp->active_pipe != INVALID_PIPE &&
- intel_dp->active_pipe !=
- intel_dp->pps_pipe);
-
- if (intel_dp->pps_pipe != INVALID_PIPE)
- pipes &= ~(1 << intel_dp->pps_pipe);
- } else {
- drm_WARN_ON(&dev_priv->drm,
- intel_dp->pps_pipe != INVALID_PIPE);
-
- if (intel_dp->active_pipe != INVALID_PIPE)
- pipes &= ~(1 << intel_dp->active_pipe);
- }
- }
-
- if (pipes == 0)
- return INVALID_PIPE;
-
- return ffs(pipes) - 1;
-}
-
-static enum pipe
-vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum pipe pipe;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- /* We should never land here with regular DP ports */
- drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
-
- drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE &&
- intel_dp->active_pipe != intel_dp->pps_pipe);
-
- if (intel_dp->pps_pipe != INVALID_PIPE)
- return intel_dp->pps_pipe;
-
- pipe = vlv_find_free_pps(dev_priv);
-
- /*
- * Didn't find one. This should not happen since there
- * are two power sequencers and up to two eDP ports.
- */
- if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
- pipe = PIPE_A;
-
- vlv_steal_power_sequencer(dev_priv, pipe);
- intel_dp->pps_pipe = pipe;
-
- drm_dbg_kms(&dev_priv->drm,
- "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(intel_dp->pps_pipe),
- dig_port->base.base.base.id,
- dig_port->base.base.name);
-
- /* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
-
- /*
- * Even vdd force doesn't work until we've made
- * the power sequencer lock in on the port.
- */
- vlv_power_sequencer_kick(intel_dp);
-
- return intel_dp->pps_pipe;
-}
-
-static int
-bxt_power_sequencer_idx(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- int backlight_controller = dev_priv->vbt.backlight.controller;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- /* We should never land here with regular DP ports */
- drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
-
- if (!intel_dp->pps_reset)
- return backlight_controller;
-
- intel_dp->pps_reset = false;
-
- /*
- * Only the HW needs to be reprogrammed, the SW state is fixed and
- * has been setup during connector init.
- */
- intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
-
- return backlight_controller;
-}
-
-typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
- enum pipe pipe);
-
-static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
-}
-
-static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
-}
-
-static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- return true;
-}
-
-static enum pipe
-vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
- enum port port,
- vlv_pipe_check pipe_check)
-{
- enum pipe pipe;
-
- for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
- u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
- PANEL_PORT_SELECT_MASK;
-
- if (port_sel != PANEL_PORT_SELECT_VLV(port))
- continue;
-
- if (!pipe_check(dev_priv, pipe))
- continue;
-
- return pipe;
- }
-
- return INVALID_PIPE;
-}
-
-static void
-vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum port port = dig_port->base.port;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- /* try to find a pipe with this port selected */
- /* first pick one where the panel is on */
- intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
- vlv_pipe_has_pp_on);
- /* didn't find one? pick one where vdd is on */
- if (intel_dp->pps_pipe == INVALID_PIPE)
- intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
- vlv_pipe_has_vdd_on);
- /* didn't find one? pick one with just the correct port */
- if (intel_dp->pps_pipe == INVALID_PIPE)
- intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
- vlv_pipe_any);
-
- /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
- if (intel_dp->pps_pipe == INVALID_PIPE) {
- drm_dbg_kms(&dev_priv->drm,
- "no initial power sequencer for [ENCODER:%d:%s]\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
- return;
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name,
- pipe_name(intel_dp->pps_pipe));
-
- intel_dp_init_panel_power_sequencer(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
-}
-
-void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
-{
- struct intel_encoder *encoder;
-
- if (drm_WARN_ON(&dev_priv->drm,
- !(IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) ||
- IS_GEN9_LP(dev_priv))))
- return;
-
- /*
- * We can't grab pps_mutex here due to deadlock with power_domain
- * mutex when power_domain functions are called while holding pps_mutex.
- * That also means that in order to use pps_pipe the code needs to
- * hold both a power domain reference and pps_mutex, and the power domain
- * reference get/put must be done while _not_ holding pps_mutex.
- * pps_{lock,unlock}() do these steps in the correct order, so one
- * should use them always.
- */
-
- for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- drm_WARN_ON(&dev_priv->drm,
- intel_dp->active_pipe != INVALID_PIPE);
-
- if (encoder->type != INTEL_OUTPUT_EDP)
- continue;
-
- if (IS_GEN9_LP(dev_priv))
- intel_dp->pps_reset = true;
- else
- intel_dp->pps_pipe = INVALID_PIPE;
- }
-}
-
-struct pps_registers {
- i915_reg_t pp_ctrl;
- i915_reg_t pp_stat;
- i915_reg_t pp_on;
- i915_reg_t pp_off;
- i915_reg_t pp_div;
-};
-
-static void intel_pps_get_registers(struct intel_dp *intel_dp,
- struct pps_registers *regs)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- int pps_idx = 0;
-
- memset(regs, 0, sizeof(*regs));
-
- if (IS_GEN9_LP(dev_priv))
- pps_idx = bxt_power_sequencer_idx(intel_dp);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- pps_idx = vlv_power_sequencer_pipe(intel_dp);
-
- regs->pp_ctrl = PP_CONTROL(pps_idx);
- regs->pp_stat = PP_STATUS(pps_idx);
- regs->pp_on = PP_ON_DELAYS(pps_idx);
- regs->pp_off = PP_OFF_DELAYS(pps_idx);
-
- /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
- if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
- regs->pp_div = INVALID_MMIO_REG;
- else
- regs->pp_div = PP_DIVISOR(pps_idx);
-}
-
-static i915_reg_t
-_pp_ctrl_reg(struct intel_dp *intel_dp)
-{
- struct pps_registers regs;
-
- intel_pps_get_registers(intel_dp, &regs);
-
- return regs.pp_ctrl;
-}
-
-static i915_reg_t
-_pp_stat_reg(struct intel_dp *intel_dp)
-{
- struct pps_registers regs;
-
- intel_pps_get_registers(intel_dp, &regs);
-
- return regs.pp_stat;
-}
-
-static bool edp_have_panel_power(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_dp->pps_pipe == INVALID_PIPE)
- return false;
-
- return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
-}
-
-static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_dp->pps_pipe == INVALID_PIPE)
- return false;
-
- return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
-}
-
-static void
-intel_dp_check_edp(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
- drm_WARN(&dev_priv->drm, 1,
- "eDP powered off while attempting aux channel communication.\n");
- drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
- intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
- intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
- }
-}
-
-static u32
-intel_dp_aux_wait_done(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
- const unsigned int timeout_ms = 10;
- u32 status;
- bool done;
-
-#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- done = wait_event_timeout(i915->gmbus_wait_queue, C,
- msecs_to_jiffies_timeout(timeout_ms));
-
- /* just trace the final value */
- trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
-
- if (!done)
- drm_err(&i915->drm,
- "%s: did not complete or timeout within %ums (status 0x%08x)\n",
- intel_dp->aux.name, timeout_ms, status);
-#undef C
-
- return status;
-}
-
-static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (index)
- return 0;
-
- /*
- * The clock divider is based off the hrawclk, and would like to run at
- * 2MHz. So, take the hrawclk value and divide by 2000 and use that
- */
- return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
-}
-
-static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- u32 freq;
-
- if (index)
- return 0;
-
- /*
- * The clock divider is based off the cdclk or PCH rawclk, and would
- * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
- * divide by 2000 and use that
- */
- if (dig_port->aux_ch == AUX_CH_A)
- freq = dev_priv->cdclk.hw.cdclk;
- else
- freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
- return DIV_ROUND_CLOSEST(freq, 2000);
-}
-
-static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-
- if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
- /* Workaround for non-ULT HSW */
- switch (index) {
- case 0: return 63;
- case 1: return 72;
- default: return 0;
- }
- }
-
- return ilk_get_aux_clock_divider(intel_dp, index);
-}
-
-static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
- /*
- * SKL doesn't need us to program the AUX clock divider (Hardware will
- * derive the clock from CDCLK automatically). We still implement the
- * get_aux_clock_divider vfunc to plug-in into the existing code.
- */
- return index ? 0 : 1;
-}
-
-static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
- int send_bytes,
- u32 aux_clock_divider)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv =
- to_i915(dig_port->base.base.dev);
- u32 precharge, timeout;
-
- if (IS_GEN(dev_priv, 6))
- precharge = 3;
- else
- precharge = 5;
-
- if (IS_BROADWELL(dev_priv))
- timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
- else
- timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
-
- return DP_AUX_CH_CTL_SEND_BUSY |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_INTERRUPT |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- timeout |
- DP_AUX_CH_CTL_RECEIVE_ERROR |
- (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
- (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
-}
-
-static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
- int send_bytes,
- u32 unused)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 =
- to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
- u32 ret;
-
- ret = DP_AUX_CH_CTL_SEND_BUSY |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_INTERRUPT |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_TIME_OUT_MAX |
- DP_AUX_CH_CTL_RECEIVE_ERROR |
- (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
- DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
-
- if (intel_phy_is_tc(i915, phy) &&
- dig_port->tc_mode == TC_PORT_TBT_ALT)
- ret |= DP_AUX_CH_CTL_TBT_IO;
-
- return ret;
-}
-
-static int
-intel_dp_aux_xfer(struct intel_dp *intel_dp,
- const u8 *send, int send_bytes,
- u8 *recv, int recv_size,
- u32 aux_send_ctl_flags)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 =
- to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
- bool is_tc_port = intel_phy_is_tc(i915, phy);
- i915_reg_t ch_ctl, ch_data[5];
- u32 aux_clock_divider;
- enum intel_display_power_domain aux_domain;
- intel_wakeref_t aux_wakeref;
- intel_wakeref_t pps_wakeref;
- int i, ret, recv_bytes;
- int try, clock = 0;
- u32 status;
- bool vdd;
-
- ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
- for (i = 0; i < ARRAY_SIZE(ch_data); i++)
- ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
-
- if (is_tc_port)
- intel_tc_port_lock(dig_port);
-
- aux_domain = intel_aux_power_domain(dig_port);
-
- aux_wakeref = intel_display_power_get(i915, aux_domain);
- pps_wakeref = pps_lock(intel_dp);
-
- /*
- * We will be called with VDD already enabled for dpcd/edid/oui reads.
- * In such cases we want to leave VDD enabled and it's up to upper layers
- * to turn it off. But for eg. i2c-dev access we need to turn it on/off
- * ourselves.
- */
- vdd = edp_panel_vdd_on(intel_dp);
-
- /* dp aux is extremely sensitive to irq latency, hence request the
- * lowest possible wakeup latency and so prevent the cpu from going into
- * deep sleep states.
- */
- cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
-
- intel_dp_check_edp(intel_dp);
-
- /* Try to wait for any previous AUX channel activity */
- for (try = 0; try < 3; try++) {
- status = intel_uncore_read_notrace(uncore, ch_ctl);
- if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- break;
- msleep(1);
- }
- /* just trace the final value */
- trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
-
- if (try == 3) {
- const u32 status = intel_uncore_read(uncore, ch_ctl);
-
- if (status != intel_dp->aux_busy_last_status) {
- drm_WARN(&i915->drm, 1,
- "%s: not started (status 0x%08x)\n",
- intel_dp->aux.name, status);
- intel_dp->aux_busy_last_status = status;
- }
-
- ret = -EBUSY;
- goto out;
- }
-
- /* Only 5 data registers! */
- if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
- ret = -E2BIG;
- goto out;
- }
-
- while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
- u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
- send_bytes,
- aux_clock_divider);
-
- send_ctl |= aux_send_ctl_flags;
-
- /* Must try at least 3 times according to DP spec */
- for (try = 0; try < 5; try++) {
- /* Load the send data into the aux channel data registers */
- for (i = 0; i < send_bytes; i += 4)
- intel_uncore_write(uncore,
- ch_data[i >> 2],
- intel_dp_pack_aux(send + i,
- send_bytes - i));
-
- /* Send the command and wait for it to complete */
- intel_uncore_write(uncore, ch_ctl, send_ctl);
-
- status = intel_dp_aux_wait_done(intel_dp);
-
- /* Clear done status and any errors */
- intel_uncore_write(uncore,
- ch_ctl,
- status |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
-
- /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
- * 400us delay required for errors and timeouts
- * Timeout errors from the HW already meet this
- * requirement so skip to next iteration
- */
- if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
- continue;
-
- if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
- usleep_range(400, 500);
- continue;
- }
- if (status & DP_AUX_CH_CTL_DONE)
- goto done;
- }
- }
-
- if ((status & DP_AUX_CH_CTL_DONE) == 0) {
- drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
- intel_dp->aux.name, status);
- ret = -EBUSY;
- goto out;
- }
-
-done:
- /* Check for timeout or receive error.
- * Timeouts occur when the sink is not connected
- */
- if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
- drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
- intel_dp->aux.name, status);
- ret = -EIO;
- goto out;
- }
-
- /* Timeouts occur when the device isn't connected, so they're
- * "normal" -- don't fill the kernel log with these */
- if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
- drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
- intel_dp->aux.name, status);
- ret = -ETIMEDOUT;
- goto out;
- }
-
- /* Unload any bytes sent back from the other side */
- recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
- DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
-
- /*
- * By BSpec: "Message sizes of 0 or >20 are not allowed."
- * We have no idea of what happened so we return -EBUSY so
- * drm layer takes care for the necessary retries.
- */
- if (recv_bytes == 0 || recv_bytes > 20) {
- drm_dbg_kms(&i915->drm,
- "%s: Forbidden recv_bytes = %d on aux transaction\n",
- intel_dp->aux.name, recv_bytes);
- ret = -EBUSY;
- goto out;
- }
-
- if (recv_bytes > recv_size)
- recv_bytes = recv_size;
-
- for (i = 0; i < recv_bytes; i += 4)
- intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
- recv + i, recv_bytes - i);
-
- ret = recv_bytes;
-out:
- cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
-
- if (vdd)
- edp_panel_vdd_off(intel_dp, false);
-
- pps_unlock(intel_dp, pps_wakeref);
- intel_display_power_put_async(i915, aux_domain, aux_wakeref);
-
- if (is_tc_port)
- intel_tc_port_unlock(dig_port);
-
- return ret;
-}
-
-#define BARE_ADDRESS_SIZE 3
-#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
-
-static void
-intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
- const struct drm_dp_aux_msg *msg)
-{
- txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
- txbuf[1] = (msg->address >> 8) & 0xff;
- txbuf[2] = msg->address & 0xff;
- txbuf[3] = msg->size - 1;
-}
-
-static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
-{
- /*
- * If we're trying to send the HDCP Aksv, we need to set a the Aksv
- * select bit to inform the hardware to send the Aksv after our header
- * since we can't access that data from software.
- */
- if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
- msg->address == DP_AUX_HDCP_AKSV)
- return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
-
- return 0;
-}
-
-static ssize_t
-intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
-{
- struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- u8 txbuf[20], rxbuf[20];
- size_t txsize, rxsize;
- u32 flags = intel_dp_aux_xfer_flags(msg);
- int ret;
-
- intel_dp_aux_header(txbuf, msg);
-
- switch (msg->request & ~DP_AUX_I2C_MOT) {
- case DP_AUX_NATIVE_WRITE:
- case DP_AUX_I2C_WRITE:
- case DP_AUX_I2C_WRITE_STATUS_UPDATE:
- txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
- rxsize = 2; /* 0 or 1 data bytes */
-
- if (drm_WARN_ON(&i915->drm, txsize > 20))
- return -E2BIG;
-
- drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
-
- if (msg->buffer)
- memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
-
- ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
- rxbuf, rxsize, flags);
- if (ret > 0) {
- msg->reply = rxbuf[0] >> 4;
-
- if (ret > 1) {
- /* Number of bytes written in a short write. */
- ret = clamp_t(int, rxbuf[1], 0, msg->size);
- } else {
- /* Return payload size. */
- ret = msg->size;
- }
- }
- break;
-
- case DP_AUX_NATIVE_READ:
- case DP_AUX_I2C_READ:
- txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
- rxsize = msg->size + 1;
-
- if (drm_WARN_ON(&i915->drm, rxsize > 20))
- return -E2BIG;
-
- ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
- rxbuf, rxsize, flags);
- if (ret > 0) {
- msg->reply = rxbuf[0] >> 4;
- /*
- * Assume happy day, and copy the data. The caller is
- * expected to check msg->reply before touching it.
- *
- * Return payload size.
- */
- ret--;
- memcpy(msg->buffer, rxbuf + 1, ret);
- }
- break;
-
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-
-static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- return DP_AUX_CH_CTL(aux_ch);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_CTL(AUX_CH_B);
- }
-}
-
-static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- return DP_AUX_CH_DATA(aux_ch, index);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_DATA(AUX_CH_B, index);
- }
-}
-
-static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- return DP_AUX_CH_CTL(aux_ch);
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- return PCH_DP_AUX_CH_CTL(aux_ch);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_CTL(AUX_CH_A);
- }
-}
-
-static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- return DP_AUX_CH_DATA(aux_ch, index);
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- return PCH_DP_AUX_CH_DATA(aux_ch, index);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_DATA(AUX_CH_A, index);
- }
-}
-
-static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- case AUX_CH_E:
- case AUX_CH_F:
- return DP_AUX_CH_CTL(aux_ch);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_CTL(AUX_CH_A);
- }
-}
-
-static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- case AUX_CH_E:
- case AUX_CH_F:
- return DP_AUX_CH_DATA(aux_ch, index);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_DATA(AUX_CH_A, index);
- }
-}
-
-static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_USBC1:
- case AUX_CH_USBC2:
- case AUX_CH_USBC3:
- case AUX_CH_USBC4:
- case AUX_CH_USBC5:
- case AUX_CH_USBC6:
- return DP_AUX_CH_CTL(aux_ch);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_CTL(AUX_CH_A);
- }
-}
-
-static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_USBC1:
- case AUX_CH_USBC2:
- case AUX_CH_USBC3:
- case AUX_CH_USBC4:
- case AUX_CH_USBC5:
- case AUX_CH_USBC6:
- return DP_AUX_CH_DATA(aux_ch, index);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_DATA(AUX_CH_A, index);
- }
-}
-
-static void
-intel_dp_aux_fini(struct intel_dp *intel_dp)
-{
- if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
- cpu_latency_qos_remove_request(&intel_dp->pm_qos);
-
- kfree(intel_dp->aux.name);
-}
-
-static void
-intel_dp_aux_init(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &dig_port->base;
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- if (INTEL_GEN(dev_priv) >= 12) {
- intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
- intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
- } else if (INTEL_GEN(dev_priv) >= 9) {
- intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
- intel_dp->aux_ch_data_reg = skl_aux_data_reg;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
- intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
- } else {
- intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
- intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
- }
-
- if (INTEL_GEN(dev_priv) >= 9)
- intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
- else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
- else if (HAS_PCH_SPLIT(dev_priv))
- intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
- else
- intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
-
- if (INTEL_GEN(dev_priv) >= 9)
- intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
- else
- intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
-
- drm_dp_aux_init(&intel_dp->aux);
-
- /* Failure to allocate our preferred name is not critical */
- if (INTEL_GEN(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1)
- intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s",
- aux_ch - AUX_CH_USBC1 + '1',
- encoder->base.name);
- else
- intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s",
- aux_ch_name(aux_ch),
- encoder->base.name);
-
- intel_dp->aux.transfer = intel_dp_aux_transfer;
- cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
-}
-
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
{
int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
@@ -2267,6 +1174,44 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
return -EINVAL;
}
+/* Optimize link config in order: max bpp, min lanes, min clock */
+static int
+intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ const struct link_config_limits *limits)
+{
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ int bpp, clock, lane_count;
+ int mode_rate, link_clock, link_avail;
+
+ for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+ int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
+
+ mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+ output_bpp);
+
+ for (lane_count = limits->min_lane_count;
+ lane_count <= limits->max_lane_count;
+ lane_count <<= 1) {
+ for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
+ link_clock = intel_dp->common_rates[clock];
+ link_avail = intel_dp_max_data_rate(link_clock,
+ lane_count);
+
+ if (mode_rate <= link_avail) {
+ pipe_config->lane_count = lane_count;
+ pipe_config->pipe_bpp = bpp;
+ pipe_config->port_clock = link_clock;
+
+ return 0;
+ }
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
{
int i, num_bpc;
@@ -2293,6 +1238,14 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
u8 line_buf_depth;
int ret;
+ /*
+ * RC_MODEL_SIZE is currently a constant across all configurations.
+ *
+ * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and
+ * DP_DSC_RC_BUF_SIZE for this.
+ */
+ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+
ret = intel_dsc_compute_params(encoder, crtc_state);
if (ret)
return ret;
@@ -2482,13 +1435,14 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format);
limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config);
- if (intel_dp_is_edp(intel_dp)) {
+ if (intel_dp->use_max_params) {
/*
* Use the maximum clock and number of lanes the eDP panel
- * advertizes being capable of. The panels are generally
+ * advertizes being capable of in case the initial fast
+ * optimal params failed us. The panels are generally
* designed to support only a single clock and lane
- * configuration, and typically these values correspond to the
- * native resolution of the panel.
+ * configuration, and typically on older panels these
+ * values correspond to the native resolution of the panel.
*/
limits.min_lane_count = limits.max_lane_count;
limits.min_clock = limits.max_clock;
@@ -2507,11 +1461,22 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp_can_bigjoiner(intel_dp))
pipe_config->bigjoiner = true;
- /*
- * Optimize for slow and wide. This is the place to add alternative
- * optimization policy.
- */
- ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
+ if (intel_dp_is_edp(intel_dp))
+ /*
+ * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
+ * section A.1: "It is recommended that the minimum number of
+ * lanes be used, using the minimum link rate allowed for that
+ * lane configuration."
+ *
+ * Note that we fall back to the max clock and lane count for eDP
+ * panels that fail with the fast optimal settings (see
+ * intel_dp->use_max_params), in which case the fast vs. wide
+ * choice doesn't matter.
+ */
+ ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits);
+ else
+ /* Optimize for slow and wide. */
+ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
/* enable compression if the mode doesn't fit available BW */
drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
@@ -2760,6 +1725,9 @@ intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ if (pipe_config->vrr.enable)
+ return;
+
/*
* DRRS and PSR can't be enable together, so giving preference to PSR
* as it allows more power-savings by complete shutting down display,
@@ -2792,8 +1760,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
- DP_DPCD_QUIRK_CONSTANT_N);
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
int ret = 0, output_bpp;
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
@@ -2863,6 +1830,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (!HAS_DDI(dev_priv))
intel_dp_set_clock(encoder, pipe_config);
+ intel_vrr_compute_config(pipe_config, conn_state);
intel_psr_compute_config(intel_dp, pipe_config);
intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp,
constant_n);
@@ -2963,427 +1931,6 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
}
}
-#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
-#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
-
-#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
-#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
-
-#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
-#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
-
-static void intel_pps_verify_state(struct intel_dp *intel_dp);
-
-static void wait_panel_status(struct intel_dp *intel_dp,
- u32 mask,
- u32 value)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- i915_reg_t pp_stat_reg, pp_ctrl_reg;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- intel_pps_verify_state(intel_dp);
-
- pp_stat_reg = _pp_stat_reg(intel_dp);
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-
- drm_dbg_kms(&dev_priv->drm,
- "mask %08x value %08x status %08x control %08x\n",
- mask, value,
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
-
- if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
- mask, value, 5000))
- drm_err(&dev_priv->drm,
- "Panel status timeout: status %08x control %08x\n",
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
-
- drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
-}
-
-static void wait_panel_on(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-
- drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
- wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
-}
-
-static void wait_panel_off(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-
- drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
- wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
-}
-
-static void wait_panel_power_cycle(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- ktime_t panel_power_on_time;
- s64 panel_power_off_duration;
-
- drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
-
- /* take the difference of currrent time and panel power off time
- * and then make panel wait for t11_t12 if needed. */
- panel_power_on_time = ktime_get_boottime();
- panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
-
- /* When we disable the VDD override bit last we have to do the manual
- * wait. */
- if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
- wait_remaining_ms_from_jiffies(jiffies,
- intel_dp->panel_power_cycle_delay - panel_power_off_duration);
-
- wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
-}
-
-static void wait_backlight_on(struct intel_dp *intel_dp)
-{
- wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
- intel_dp->backlight_on_delay);
-}
-
-static void edp_wait_backlight_off(struct intel_dp *intel_dp)
-{
- wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
- intel_dp->backlight_off_delay);
-}
-
-/* Read the current pp_control value, unlocking the register if it
- * is locked
- */
-
-static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 control;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
- if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
- (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
- control &= ~PANEL_UNLOCK_MASK;
- control |= PANEL_UNLOCK_REGS;
- }
- return control;
-}
-
-/*
- * Must be paired with edp_panel_vdd_off().
- * Must hold pps_mutex around the whole on/off sequence.
- * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
- */
-static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- u32 pp;
- i915_reg_t pp_stat_reg, pp_ctrl_reg;
- bool need_to_disable = !intel_dp->want_panel_vdd;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if (!intel_dp_is_edp(intel_dp))
- return false;
-
- cancel_delayed_work(&intel_dp->panel_vdd_work);
- intel_dp->want_panel_vdd = true;
-
- if (edp_have_panel_vdd(intel_dp))
- return need_to_disable;
-
- intel_display_power_get(dev_priv,
- intel_aux_power_domain(dig_port));
-
- drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
-
- if (!edp_have_panel_power(intel_dp))
- wait_panel_power_cycle(intel_dp);
-
- pp = ilk_get_pp_control(intel_dp);
- pp |= EDP_FORCE_VDD;
-
- pp_stat_reg = _pp_stat_reg(intel_dp);
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
- /*
- * If the panel wasn't on, delay before accessing aux channel
- */
- if (!edp_have_panel_power(intel_dp)) {
- drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] panel power wasn't enabled\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
- msleep(intel_dp->panel_power_up_delay);
- }
-
- return need_to_disable;
-}
-
-/*
- * Must be paired with intel_edp_panel_vdd_off() or
- * intel_edp_panel_off().
- * Nested calls to these functions are not allowed since
- * we drop the lock. Caller must use some higher level
- * locking to prevent nested calls from other threads.
- */
-void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
-{
- intel_wakeref_t wakeref;
- bool vdd;
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- vdd = false;
- with_pps_lock(intel_dp, wakeref)
- vdd = edp_panel_vdd_on(intel_dp);
- I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
-}
-
-static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port =
- dp_to_dig_port(intel_dp);
- u32 pp;
- i915_reg_t pp_stat_reg, pp_ctrl_reg;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd);
-
- if (!edp_have_panel_vdd(intel_dp))
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
-
- pp = ilk_get_pp_control(intel_dp);
- pp &= ~EDP_FORCE_VDD;
-
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- pp_stat_reg = _pp_stat_reg(intel_dp);
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
-
- /* Make sure sequencer is idle before allowing subsequent activity */
- drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
-
- if ((pp & PANEL_POWER_ON) == 0)
- intel_dp->panel_power_off_time = ktime_get_boottime();
-
- intel_display_power_put_unchecked(dev_priv,
- intel_aux_power_domain(dig_port));
-}
-
-static void edp_panel_vdd_work(struct work_struct *__work)
-{
- struct intel_dp *intel_dp =
- container_of(to_delayed_work(__work),
- struct intel_dp, panel_vdd_work);
- intel_wakeref_t wakeref;
-
- with_pps_lock(intel_dp, wakeref) {
- if (!intel_dp->want_panel_vdd)
- edp_panel_vdd_off_sync(intel_dp);
- }
-}
-
-static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
-{
- unsigned long delay;
-
- /*
- * Queue the timer to fire a long time from now (relative to the power
- * down delay) to keep the panel power up across a sequence of
- * operations.
- */
- delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
- schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
-}
-
-/*
- * Must be paired with edp_panel_vdd_on().
- * Must hold pps_mutex around the whole on/off sequence.
- * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
- */
-static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
-
- intel_dp->want_panel_vdd = false;
-
- if (sync)
- edp_panel_vdd_off_sync(intel_dp);
- else
- edp_panel_vdd_schedule_off(intel_dp);
-}
-
-static void edp_panel_on(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp;
- i915_reg_t pp_ctrl_reg;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
-
- if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
- "[ENCODER:%d:%s] panel power already on\n",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name))
- return;
-
- wait_panel_power_cycle(intel_dp);
-
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- pp = ilk_get_pp_control(intel_dp);
- if (IS_GEN(dev_priv, 5)) {
- /* ILK workaround: disable reset around power sequence */
- pp &= ~PANEL_POWER_RESET;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- }
-
- pp |= PANEL_POWER_ON;
- if (!IS_GEN(dev_priv, 5))
- pp |= PANEL_POWER_RESET;
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
-
- wait_panel_on(intel_dp);
- intel_dp->last_power_on = jiffies;
-
- if (IS_GEN(dev_priv, 5)) {
- pp |= PANEL_POWER_RESET; /* restore panel reset bit */
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- }
-}
-
-void intel_edp_panel_on(struct intel_dp *intel_dp)
-{
- intel_wakeref_t wakeref;
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- with_pps_lock(intel_dp, wakeref)
- edp_panel_on(intel_dp);
-}
-
-
-static void edp_panel_off(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- u32 pp;
- i915_reg_t pp_ctrl_reg;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
- dig_port->base.base.base.id, dig_port->base.base.name);
-
- drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd,
- "Need [ENCODER:%d:%s] VDD to turn off panel\n",
- dig_port->base.base.base.id, dig_port->base.base.name);
-
- pp = ilk_get_pp_control(intel_dp);
- /* We need to switch off panel power _and_ force vdd, for otherwise some
- * panels get very unhappy and cease to work. */
- pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
- EDP_BLC_ENABLE);
-
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-
- intel_dp->want_panel_vdd = false;
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
-
- wait_panel_off(intel_dp);
- intel_dp->panel_power_off_time = ktime_get_boottime();
-
- /* We got a reference when we enabled the VDD. */
- intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
-}
-
-void intel_edp_panel_off(struct intel_dp *intel_dp)
-{
- intel_wakeref_t wakeref;
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- with_pps_lock(intel_dp, wakeref)
- edp_panel_off(intel_dp);
-}
-
-/* Enable backlight in the panel power control. */
-static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- intel_wakeref_t wakeref;
-
- /*
- * If we enable the backlight right away following a panel power
- * on, we may see slight flicker as the panel syncs with the eDP
- * link. So delay a bit to make sure the image is solid before
- * allowing it to appear.
- */
- wait_backlight_on(intel_dp);
-
- with_pps_lock(intel_dp, wakeref) {
- i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- u32 pp;
-
- pp = ilk_get_pp_control(intel_dp);
- pp |= EDP_BLC_ENABLE;
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- }
-}
/* Enable backlight PWM and backlight PP control. */
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
@@ -3398,31 +1945,7 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
drm_dbg_kms(&i915->drm, "\n");
intel_panel_enable_backlight(crtc_state, conn_state);
- _intel_edp_backlight_on(intel_dp);
-}
-
-/* Disable backlight in the panel power control. */
-static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- intel_wakeref_t wakeref;
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- with_pps_lock(intel_dp, wakeref) {
- i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- u32 pp;
-
- pp = ilk_get_pp_control(intel_dp);
- pp &= ~EDP_BLC_ENABLE;
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- }
-
- intel_dp->last_backlight_off = jiffies;
- edp_wait_backlight_off(intel_dp);
+ intel_pps_backlight_on(intel_dp);
}
/* Disable backlight PP control and backlight PWM. */
@@ -3436,37 +1959,10 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
drm_dbg_kms(&i915->drm, "\n");
- _intel_edp_backlight_off(intel_dp);
+ intel_pps_backlight_off(intel_dp);
intel_panel_disable_backlight(old_conn_state);
}
-/*
- * Hook for controlling the panel power control backlight through the bl_power
- * sysfs attribute. Take care to handle multiple calls.
- */
-static void intel_edp_backlight_power(struct intel_connector *connector,
- bool enable)
-{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct intel_dp *intel_dp = intel_attached_dp(connector);
- intel_wakeref_t wakeref;
- bool is_enabled;
-
- is_enabled = false;
- with_pps_lock(intel_dp, wakeref)
- is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
- if (is_enabled == enable)
- return;
-
- drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
- enable ? "enable" : "disable");
-
- if (enable)
- _intel_edp_backlight_on(intel_dp);
- else
- _intel_edp_backlight_off(intel_dp);
-}
-
static void assert_dp_port(struct intel_dp *intel_dp, bool state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -3583,6 +2079,29 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
enable ? "enable" : "disable");
}
+static void
+intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 oui[] = { 0x00, 0xaa, 0x01 };
+ u8 buf[3] = { 0 };
+
+ /*
+ * During driver init, we want to be careful and avoid changing the source OUI if it's
+ * already set to what we want, so as to avoid clearing any state by accident
+ */
+ if (careful) {
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0)
+ drm_err(&i915->drm, "Failed to read source OUI\n");
+
+ if (memcmp(oui, buf, sizeof(oui)) == 0)
+ return;
+ }
+
+ if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
+ drm_err(&i915->drm, "Failed to write source OUI\n");
+}
+
/* If the device supports it, try to set the power state appropriately */
void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
{
@@ -3604,6 +2123,10 @@ void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
lspcon_resume(dp_to_dig_port(intel_dp));
+ /* Write the source OUI as early as possible */
+ if (intel_dp_is_edp(intel_dp))
+ intel_edp_init_source_oui(intel_dp, false);
+
/*
* When turning on, we need to retry for 1ms to give the sink
* time to wake up.
@@ -3860,10 +2383,12 @@ static void intel_disable_dp(struct intel_atomic_state *state,
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
- intel_edp_panel_vdd_on(intel_dp);
+ intel_pps_vdd_on(intel_dp);
intel_edp_backlight_off(old_conn_state);
intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
- intel_edp_panel_off(intel_dp);
+ intel_pps_off(intel_dp);
+ intel_dp->frl.is_trained = false;
+ intel_dp->frl.trained_rate_gbps = 0;
}
static void g4x_disable_dp(struct intel_atomic_state *state,
@@ -3959,6 +2484,280 @@ cpt_set_link_train(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
+static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ /* Clear the cached register set to avoid using stale values */
+
+ memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd));
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
+ intel_dp->pcon_dsc_dpcd,
+ sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
+ drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n",
+ DP_PCON_DSC_ENCODER);
+
+ drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n",
+ (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd);
+}
+
+static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask)
+{
+ int bw_gbps[] = {9, 18, 24, 32, 40, 48};
+ int i;
+
+ for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) {
+ if (frl_bw_mask & (1 << i))
+ return bw_gbps[i];
+ }
+ return 0;
+}
+
+static int intel_dp_pcon_set_frl_mask(int max_frl)
+{
+ switch (max_frl) {
+ case 48:
+ return DP_PCON_FRL_BW_MASK_48GBPS;
+ case 40:
+ return DP_PCON_FRL_BW_MASK_40GBPS;
+ case 32:
+ return DP_PCON_FRL_BW_MASK_32GBPS;
+ case 24:
+ return DP_PCON_FRL_BW_MASK_24GBPS;
+ case 18:
+ return DP_PCON_FRL_BW_MASK_18GBPS;
+ case 9:
+ return DP_PCON_FRL_BW_MASK_9GBPS;
+ }
+
+ return 0;
+}
+
+static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
+{
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_connector *connector = &intel_connector->base;
+ int max_frl_rate;
+ int max_lanes, rate_per_lane;
+ int max_dsc_lanes, dsc_rate_per_lane;
+
+ max_lanes = connector->display_info.hdmi.max_lanes;
+ rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane;
+ max_frl_rate = max_lanes * rate_per_lane;
+
+ if (connector->display_info.hdmi.dsc_cap.v_1p2) {
+ max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes;
+ dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane;
+ if (max_dsc_lanes && dsc_rate_per_lane)
+ max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane);
+ }
+
+ return max_frl_rate;
+}
+
+static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
+{
+#define PCON_EXTENDED_TRAIN_MODE (1 > 0)
+#define PCON_CONCURRENT_MODE (1 > 0)
+#define PCON_SEQUENTIAL_MODE !PCON_CONCURRENT_MODE
+#define PCON_NORMAL_TRAIN_MODE !PCON_EXTENDED_TRAIN_MODE
+#define TIMEOUT_FRL_READY_MS 500
+#define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000
+
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret;
+ u8 max_frl_bw_mask = 0, frl_trained_mask;
+ bool is_active;
+
+ ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
+ if (ret < 0)
+ return ret;
+
+ max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
+ drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
+
+ max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp);
+ drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw);
+
+ max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw);
+
+ if (max_frl_bw <= 0)
+ return -EINVAL;
+
+ ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false);
+ if (ret < 0)
+ return ret;
+ /* Wait for PCON to be FRL Ready */
+ wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS);
+
+ if (!is_active)
+ return -ETIMEDOUT;
+
+ max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
+ ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, PCON_SEQUENTIAL_MODE);
+ if (ret < 0)
+ return ret;
+ ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, PCON_NORMAL_TRAIN_MODE);
+ if (ret < 0)
+ return ret;
+ ret = drm_dp_pcon_frl_enable(&intel_dp->aux);
+ if (ret < 0)
+ return ret;
+ /*
+ * Wait for FRL to be completed
+ * Check if the HDMI Link is up and active.
+ */
+ wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS);
+
+ if (!is_active)
+ return -ETIMEDOUT;
+
+ /* Verify HDMI Link configuration shows FRL Mode */
+ if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) !=
+ DP_PCON_HDMI_MODE_FRL) {
+ drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n");
+ return -EINVAL;
+ }
+ drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask);
+
+ intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
+ intel_dp->frl.is_trained = true;
+ drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
+
+ return 0;
+}
+
+static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp)
+{
+ if (drm_dp_is_branch(intel_dp->dpcd) &&
+ intel_dp->has_hdmi_sink &&
+ intel_dp_hdmi_sink_max_frl(intel_dp) > 0)
+ return true;
+
+ return false;
+}
+
+void intel_dp_check_frl_training(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ /* Always go for FRL training if supported */
+ if (!intel_dp_is_hdmi_2_1_sink(intel_dp) ||
+ intel_dp->frl.is_trained)
+ return;
+
+ if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
+ int ret, mode;
+
+ drm_dbg(&dev_priv->drm, "Couldnt set FRL mode, continuing with TMDS mode\n");
+ ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
+ mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
+
+ if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
+ drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n");
+ } else {
+ drm_dbg(&dev_priv->drm, "FRL training Completed\n");
+ }
+}
+
+static int
+intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state)
+{
+ int vactive = crtc_state->hw.adjusted_mode.vdisplay;
+
+ return intel_hdmi_dsc_get_slice_height(vactive);
+}
+
+static int
+intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_connector *connector = &intel_connector->base;
+ int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice;
+ int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices;
+ int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd);
+ int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd);
+
+ return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices,
+ pcon_max_slice_width,
+ hdmi_max_slices, hdmi_throughput);
+}
+
+static int
+intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int num_slices, int slice_width)
+{
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_connector *connector = &intel_connector->base;
+ int output_format = crtc_state->output_format;
+ bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp;
+ int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd);
+ int hdmi_max_chunk_bytes =
+ connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024;
+
+ return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width,
+ num_slices, output_format, hdmi_all_bpp,
+ hdmi_max_chunk_bytes);
+}
+
+void
+intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 pps_param[6];
+ int slice_height;
+ int slice_width;
+ int num_slices;
+ int bits_per_pixel;
+ int ret;
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct drm_connector *connector;
+ bool hdmi_is_dsc_1_2;
+
+ if (!intel_dp_is_hdmi_2_1_sink(intel_dp))
+ return;
+
+ if (!intel_connector)
+ return;
+ connector = &intel_connector->base;
+ hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2;
+
+ if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) ||
+ !hdmi_is_dsc_1_2)
+ return;
+
+ slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state);
+ if (!slice_height)
+ return;
+
+ num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state);
+ if (!num_slices)
+ return;
+
+ slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay,
+ num_slices);
+
+ bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state,
+ num_slices, slice_width);
+ if (!bits_per_pixel)
+ return;
+
+ pps_param[0] = slice_height & 0xFF;
+ pps_param[1] = slice_height >> 8;
+ pps_param[2] = slice_width & 0xFF;
+ pps_param[3] = slice_width >> 8;
+ pps_param[4] = bits_per_pixel & 0xFF;
+ pps_param[5] = (bits_per_pixel >> 8) & 0x3;
+
+ ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param);
+ if (ret < 0)
+ drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n");
+}
+
static void
g4x_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
@@ -4044,12 +2843,42 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
enableddisabled(intel_dp->dfp.ycbcr_444_to_420));
tmp = 0;
+ if (intel_dp->dfp.rgb_to_ycbcr) {
+ bool bt2020, bt709;
- if (drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_PROTOCOL_CONVERTER_CONTROL_2, tmp) <= 0)
+ /*
+ * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only
+ * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default.
+ *
+ */
+ tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE;
+
+ bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
+ intel_dp->downstream_ports,
+ DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
+ bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
+ intel_dp->downstream_ports,
+ DP_DS_HDMI_BT709_RGB_YCBCR_CONV);
+ switch (crtc_state->infoframes.vsc.colorimetry) {
+ case DP_COLORIMETRY_BT2020_RGB:
+ case DP_COLORIMETRY_BT2020_YCC:
+ if (bt2020)
+ tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE;
+ break;
+ case DP_COLORIMETRY_BT709_YCC:
+ case DP_COLORIMETRY_XVYCC_709:
+ if (bt709)
+ tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
drm_dbg_kms(&i915->drm,
- "Failed to set protocol converter YCbCr 4:2:2 conversion mode to %s\n",
- enableddisabled(false));
+ "Failed to set protocol converter RGB->YCbCr conversion mode to %s\n",
+ enableddisabled(tmp ? true : false));
}
static void intel_enable_dp(struct intel_atomic_state *state,
@@ -4067,15 +2896,15 @@ static void intel_enable_dp(struct intel_atomic_state *state,
if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
return;
- with_pps_lock(intel_dp, wakeref) {
+ with_intel_pps_lock(intel_dp, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_init_panel_power_sequencer(encoder, pipe_config);
+ vlv_pps_init(encoder, pipe_config);
intel_dp_enable_port(intel_dp, pipe_config);
- edp_panel_vdd_on(intel_dp);
- edp_panel_on(intel_dp);
- edp_panel_vdd_off(intel_dp, true);
+ intel_pps_vdd_on_unlocked(intel_dp);
+ intel_pps_on_unlocked(intel_dp);
+ intel_pps_vdd_off_unlocked(intel_dp, true);
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -4090,6 +2919,8 @@ static void intel_enable_dp(struct intel_atomic_state *state,
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
intel_dp_configure_protocol_converter(intel_dp, pipe_config);
+ intel_dp_check_frl_training(intel_dp);
+ intel_dp_pcon_dsc_configure(intel_dp, pipe_config);
intel_dp_start_link_train(intel_dp, pipe_config);
intel_dp_stop_link_train(intel_dp, pipe_config);
@@ -4132,112 +2963,6 @@ static void g4x_pre_enable_dp(struct intel_atomic_state *state,
ilk_edp_pll_on(intel_dp, pipe_config);
}
-static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum pipe pipe = intel_dp->pps_pipe;
- i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
-
- drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
-
- if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
- return;
-
- edp_panel_vdd_off_sync(intel_dp);
-
- /*
- * VLV seems to get confused when multiple power sequencers
- * have the same port selected (even if only one has power/vdd
- * enabled). The failure manifests as vlv_wait_port_ready() failing
- * CHV on the other hand doesn't seem to mind having the same port
- * selected in multiple power sequencers, but let's clear the
- * port select always when logically disconnecting a power sequencer
- * from a port.
- */
- drm_dbg_kms(&dev_priv->drm,
- "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
- pipe_name(pipe), dig_port->base.base.base.id,
- dig_port->base.base.name);
- intel_de_write(dev_priv, pp_on_reg, 0);
- intel_de_posting_read(dev_priv, pp_on_reg);
-
- intel_dp->pps_pipe = INVALID_PIPE;
-}
-
-static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- struct intel_encoder *encoder;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe,
- "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
- pipe_name(pipe), encoder->base.base.id,
- encoder->base.name);
-
- if (intel_dp->pps_pipe != pipe)
- continue;
-
- drm_dbg_kms(&dev_priv->drm,
- "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
- pipe_name(pipe), encoder->base.base.id,
- encoder->base.name);
-
- /* make sure vdd is off before we steal it */
- vlv_detach_power_sequencer(intel_dp);
- }
-}
-
-static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
-
- if (intel_dp->pps_pipe != INVALID_PIPE &&
- intel_dp->pps_pipe != crtc->pipe) {
- /*
- * If another power sequencer was being used on this
- * port previously make sure to turn off vdd there while
- * we still have control of it.
- */
- vlv_detach_power_sequencer(intel_dp);
- }
-
- /*
- * We may be stealing the power
- * sequencer from another port.
- */
- vlv_steal_power_sequencer(dev_priv, crtc->pipe);
-
- intel_dp->active_pipe = crtc->pipe;
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- /* now it's all ours */
- intel_dp->pps_pipe = crtc->pipe;
-
- drm_dbg_kms(&dev_priv->drm,
- "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
- encoder->base.name);
-
- /* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
-}
-
static void vlv_pre_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
@@ -4637,18 +3362,35 @@ ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
+static char dp_training_pattern_name(u8 train_pat)
+{
+ switch (train_pat) {
+ case DP_TRAINING_PATTERN_1:
+ case DP_TRAINING_PATTERN_2:
+ case DP_TRAINING_PATTERN_3:
+ return '0' + train_pat;
+ case DP_TRAINING_PATTERN_4:
+ return '4';
+ default:
+ MISSING_CASE(train_pat);
+ return '?';
+ }
+}
+
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
- if ((intel_dp_training_pattern_symbol(dp_train_pat)) !=
- DP_TRAINING_PATTERN_DISABLE)
+ if (train_pat != DP_TRAINING_PATTERN_DISABLE)
drm_dbg_kms(&dev_priv->drm,
- "Using DP training pattern TPS%d\n",
- intel_dp_training_pattern_symbol(dp_train_pat));
+ "[ENCODER:%d:%s] Using DP training pattern TPS%c\n",
+ encoder->base.base.id, encoder->base.name,
+ dp_training_pattern_name(train_pat));
intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
}
@@ -4714,15 +3456,15 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
- msleep(intel_dp->panel_power_down_delay);
+ msleep(intel_dp->pps.panel_power_down_delay);
intel_dp->DP = DP;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_wakeref_t wakeref;
- with_pps_lock(intel_dp, wakeref)
- intel_dp->active_pipe = INVALID_PIPE;
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_dp->pps.active_pipe = INVALID_PIPE;
}
}
@@ -4852,6 +3594,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
intel_dp_get_dsc_sink_cap(intel_dp);
+ /*
+ * If needed, program our source OUI so we can make various Intel-specific AUX services
+ * available (such as HDR backlight controls)
+ */
+ intel_edp_init_source_oui(intel_dp, true);
+
return true;
}
@@ -5758,6 +4506,17 @@ update_status:
"Could not write test response to sink\n");
}
+static void
+intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled)
+{
+ drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled);
+
+ if (esi[1] & DP_CP_IRQ) {
+ intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
+ *handled = true;
+ }
+}
+
/**
* intel_dp_check_mst_status - service any pending MST interrupts, check link status
* @intel_dp: Intel DP struct
@@ -5802,7 +4561,8 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
- drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
+ intel_dp_mst_hpd_irq(intel_dp, esi, &handled);
+
if (!handled)
break;
@@ -5820,6 +4580,28 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
return link_ok;
}
+static void
+intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
+{
+ bool is_active;
+ u8 buf = 0;
+
+ is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux);
+ if (intel_dp->frl.is_trained && !is_active) {
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0)
+ return;
+
+ buf &= ~DP_PCON_ENABLE_HDMI_LINK;
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0)
+ return;
+
+ drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base);
+
+ /* Restart FRL training or fall back to TMDS mode */
+ intel_dp_check_frl_training(intel_dp);
+ }
+}
+
static bool
intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
{
@@ -5993,6 +4775,8 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
!intel_dp_mst_is_master_trans(crtc_state))
continue;
+ intel_dp_check_frl_training(intel_dp);
+ intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
intel_dp_start_link_train(intel_dp, crtc_state);
intel_dp_stop_link_train(intel_dp, crtc_state);
break;
@@ -6102,7 +4886,7 @@ static int intel_dp_do_phy_test(struct intel_encoder *encoder,
return 0;
}
-static void intel_dp_phy_test(struct intel_encoder *encoder)
+void intel_dp_phy_test(struct intel_encoder *encoder)
{
struct drm_modeset_acquire_ctx ctx;
int ret;
@@ -6184,7 +4968,7 @@ intel_dp_hotplug(struct intel_encoder *encoder,
return state;
}
-static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
+static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 val;
@@ -6208,6 +4992,30 @@ static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
}
+static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 val;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) {
+ drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n");
+ return;
+ }
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) {
+ drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n");
+ return;
+ }
+
+ if (val & HDMI_LINK_STATUS_CHANGED)
+ intel_dp_handle_hdmi_link_status_change(intel_dp);
+}
+
/*
* According to DP spec
* 5.1.2:
@@ -6247,7 +5055,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
return false;
}
- intel_dp_check_service_irq(intel_dp);
+ intel_dp_check_device_service_irq(intel_dp);
+ intel_dp_check_link_service_irq(intel_dp);
/* Handle CEC interrupts, if any */
drm_dp_cec_irq(&intel_dp->aux);
@@ -6467,13 +5276,20 @@ intel_dp_update_dfp(struct intel_dp *intel_dp,
intel_dp->downstream_ports,
edid);
+ intel_dp->dfp.pcon_max_frl_bw =
+ drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd,
+ intel_dp->downstream_ports);
+
drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d\n",
+ "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n",
connector->base.base.id, connector->base.name,
intel_dp->dfp.max_bpc,
intel_dp->dfp.max_dotclock,
intel_dp->dfp.min_tmds_clock,
- intel_dp->dfp.max_tmds_clock);
+ intel_dp->dfp.max_tmds_clock,
+ intel_dp->dfp.pcon_max_frl_bw);
+
+ intel_dp_get_pcon_dsc_cap(intel_dp);
}
static void
@@ -6481,7 +5297,7 @@ intel_dp_update_420(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420;
+ bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr;
/* No YCbCr output support on gmch platforms */
if (HAS_GMCH(i915))
@@ -6503,14 +5319,26 @@ intel_dp_update_420(struct intel_dp *intel_dp)
dp_to_dig_port(intel_dp)->lspcon.active ||
drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
intel_dp->downstream_ports);
+ rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
+ intel_dp->downstream_ports,
+ DP_DS_HDMI_BT601_RGB_YCBCR_CONV |
+ DP_DS_HDMI_BT709_RGB_YCBCR_CONV |
+ DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
if (INTEL_GEN(i915) >= 11) {
+ /* Let PCON convert from RGB->YCbCr if possible */
+ if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) {
+ intel_dp->dfp.rgb_to_ycbcr = true;
+ intel_dp->dfp.ycbcr_444_to_420 = true;
+ connector->base.ycbcr_420_allowed = true;
+ } else {
/* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */
- intel_dp->dfp.ycbcr_444_to_420 =
- ycbcr_444_to_420 && !ycbcr_420_passthrough;
+ intel_dp->dfp.ycbcr_444_to_420 =
+ ycbcr_444_to_420 && !ycbcr_420_passthrough;
- connector->base.ycbcr_420_allowed =
- !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
+ connector->base.ycbcr_420_allowed =
+ !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
+ }
} else {
/* 4:4:4->4:2:0 conversion is the only way */
intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420;
@@ -6519,8 +5347,9 @@ intel_dp_update_420(struct intel_dp *intel_dp)
}
drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
+ "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
connector->base.base.id, connector->base.name,
+ yesno(intel_dp->dfp.rgb_to_ycbcr),
yesno(connector->base.ycbcr_420_allowed),
yesno(intel_dp->dfp.ycbcr_444_to_420));
}
@@ -6544,7 +5373,6 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
}
drm_dp_cec_set_edid(&intel_dp->aux, edid);
- intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
}
static void
@@ -6558,13 +5386,14 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
intel_dp->has_hdmi_sink = false;
intel_dp->has_audio = false;
- intel_dp->edid_quirks = 0;
intel_dp->dfp.max_bpc = 0;
intel_dp->dfp.max_dotclock = 0;
intel_dp->dfp.min_tmds_clock = 0;
intel_dp->dfp.max_tmds_clock = 0;
+ intel_dp->dfp.pcon_max_frl_bw = 0;
+
intel_dp->dfp.ycbcr_444_to_420 = false;
connector->base.ycbcr_420_allowed = false;
}
@@ -6670,7 +5499,7 @@ intel_dp_detect(struct drm_connector *connector,
to_intel_connector(connector)->detect_edid)
status = connector_status_connected;
- intel_dp_check_service_irq(intel_dp);
+ intel_dp_check_device_service_irq(intel_dp);
out:
if (status != connector_status_connected && !intel_dp->is_mst)
@@ -6723,6 +5552,10 @@ static int intel_dp_get_modes(struct drm_connector *connector)
edid = intel_connector->detect_edid;
if (edid) {
int ret = intel_connector_update_modes(connector, edid);
+
+ if (intel_vrr_is_capable(connector))
+ drm_connector_set_vrr_capable_property(connector,
+ true);
if (ret)
return ret;
}
@@ -6761,6 +5594,8 @@ intel_dp_connector_register(struct drm_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_lspcon *lspcon = &dig_port->lspcon;
int ret;
ret = intel_connector_register(connector);
@@ -6774,6 +5609,22 @@ intel_dp_connector_register(struct drm_connector *connector)
ret = drm_dp_aux_register(&intel_dp->aux);
if (!ret)
drm_dp_cec_register_connector(&intel_dp->aux, connector);
+
+ if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
+ return ret;
+
+ /*
+ * ToDo: Clean this up to handle lspcon init and resume more
+ * efficiently and streamlined.
+ */
+ if (lspcon_init(dig_port)) {
+ lspcon_detect_hdr_capability(lspcon);
+ if (lspcon->hdr_supported)
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.hdr_output_metadata_property,
+ 0);
+ }
+
return ret;
}
@@ -6793,17 +5644,8 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
struct intel_dp *intel_dp = &dig_port->dp;
intel_dp_mst_encoder_cleanup(dig_port);
- if (intel_dp_is_edp(intel_dp)) {
- intel_wakeref_t wakeref;
- cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- /*
- * vdd might still be enabled do to the delayed vdd off.
- * Make sure vdd is actually turned off here.
- */
- with_pps_lock(intel_dp, wakeref)
- edp_panel_vdd_off_sync(intel_dp);
- }
+ intel_pps_vdd_off_sync(intel_dp);
intel_dp_aux_fini(intel_dp);
}
@@ -6819,53 +5661,15 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
- intel_wakeref_t wakeref;
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- /*
- * vdd might still be enabled do to the delayed vdd off.
- * Make sure vdd is actually turned off here.
- */
- cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- with_pps_lock(intel_dp, wakeref)
- edp_panel_vdd_off_sync(intel_dp);
+ intel_pps_vdd_off_sync(intel_dp);
}
void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
- intel_wakeref_t wakeref;
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- with_pps_lock(intel_dp, wakeref)
- wait_panel_power_cycle(intel_dp);
-}
-
-static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if (!edp_have_panel_vdd(intel_dp))
- return;
-
- /*
- * The VDD bit needs a power domain reference, so if the bit is
- * already enabled when we boot or resume, grab this reference and
- * schedule a vdd off, so we don't hold on to the reference
- * indefinitely.
- */
- drm_dbg_kms(&dev_priv->drm,
- "VDD left on by BIOS, adjusting state tracking\n");
- intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
-
- edp_panel_vdd_schedule_off(intel_dp);
+ intel_pps_wait_power_cycle(intel_dp);
}
static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
@@ -6885,30 +5689,20 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
- intel_wakeref_t wakeref;
if (!HAS_DDI(dev_priv))
intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
intel_dp->reset_link_params = true;
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !intel_dp_is_edp(intel_dp))
- return;
-
- with_pps_lock(intel_dp, wakeref) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- intel_dp->active_pipe = vlv_active_pipe(intel_dp);
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_wakeref_t wakeref;
- if (intel_dp_is_edp(intel_dp)) {
- /*
- * Reinit the power sequencer, in case BIOS did
- * something nasty with it.
- */
- intel_dp_pps_init(intel_dp);
- intel_edp_panel_vdd_sanitize(intel_dp);
- }
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
}
+
+ intel_pps_encoder_reset(intel_dp);
}
static int intel_modeset_tile_group(struct intel_atomic_state *state,
@@ -7073,19 +5867,6 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
.destroy = intel_dp_encoder_destroy,
};
-static bool intel_edp_have_power(struct intel_dp *intel_dp)
-{
- intel_wakeref_t wakeref;
- bool have_power = false;
-
- with_pps_lock(intel_dp, wakeref) {
- have_power = edp_have_panel_power(intel_dp) &&
- edp_have_panel_vdd(intel_dp);
- }
-
- return have_power;
-}
-
enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
{
@@ -7093,7 +5874,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
struct intel_dp *intel_dp = &dig_port->dp;
if (dig_port->base.type == INTEL_OUTPUT_EDP &&
- (long_hpd || !intel_edp_have_power(intel_dp))) {
+ (long_hpd || !intel_pps_have_power(intel_dp))) {
/*
* vdd off can generate a long/short pulse on eDP which
* would require vdd on to handle it, and thus we
@@ -7162,7 +5943,13 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
else if (INTEL_GEN(dev_priv) >= 5)
drm_connector_attach_max_bpc_property(connector, 6, 12);
- intel_attach_colorspace_property(connector);
+ /* Register HDMI colorspace for case of lspcon */
+ if (intel_bios_is_lspcon_present(dev_priv, port)) {
+ drm_connector_attach_content_type_property(connector);
+ intel_attach_hdmi_colorspace_property(connector);
+ } else {
+ intel_attach_dp_colorspace_property(connector);
+ }
if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11)
drm_object_attach_property(&connector->base,
@@ -7181,277 +5968,9 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
}
-}
-
-static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
-{
- intel_dp->panel_power_off_time = ktime_get_boottime();
- intel_dp->last_power_on = jiffies;
- intel_dp->last_backlight_off = jiffies;
-}
-
-static void
-intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp_on, pp_off, pp_ctl;
- struct pps_registers regs;
-
- intel_pps_get_registers(intel_dp, &regs);
-
- pp_ctl = ilk_get_pp_control(intel_dp);
-
- /* Ensure PPS is unlocked */
- if (!HAS_DDI(dev_priv))
- intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
-
- pp_on = intel_de_read(dev_priv, regs.pp_on);
- pp_off = intel_de_read(dev_priv, regs.pp_off);
-
- /* Pull timing values out of registers */
- seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
- seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
- seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
- seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
-
- if (i915_mmio_reg_valid(regs.pp_div)) {
- u32 pp_div;
-
- pp_div = intel_de_read(dev_priv, regs.pp_div);
-
- seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
- } else {
- seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
- }
-}
-
-static void
-intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
-{
- DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- state_name,
- seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
-}
-static void
-intel_pps_verify_state(struct intel_dp *intel_dp)
-{
- struct edp_power_seq hw;
- struct edp_power_seq *sw = &intel_dp->pps_delays;
-
- intel_pps_readout_hw_state(intel_dp, &hw);
-
- if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
- hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
- DRM_ERROR("PPS state mismatch\n");
- intel_pps_dump_state("sw", sw);
- intel_pps_dump_state("hw", &hw);
- }
-}
-
-static void
-intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct edp_power_seq cur, vbt, spec,
- *final = &intel_dp->pps_delays;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- /* already initialized? */
- if (final->t11_t12 != 0)
- return;
-
- intel_pps_readout_hw_state(intel_dp, &cur);
-
- intel_pps_dump_state("cur", &cur);
-
- vbt = dev_priv->vbt.edp.pps;
- /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
- * of 500ms appears to be too short. Ocassionally the panel
- * just fails to power back on. Increasing the delay to 800ms
- * seems sufficient to avoid this problem.
- */
- if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
- vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
- drm_dbg_kms(&dev_priv->drm,
- "Increasing T12 panel delay as per the quirk to %d\n",
- vbt.t11_t12);
- }
- /* T11_T12 delay is special and actually in units of 100ms, but zero
- * based in the hw (so we need to add 100 ms). But the sw vbt
- * table multiplies it with 1000 to make it in units of 100usec,
- * too. */
- vbt.t11_t12 += 100 * 10;
-
- /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
- * our hw here, which are all in 100usec. */
- spec.t1_t3 = 210 * 10;
- spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
- spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
- spec.t10 = 500 * 10;
- /* This one is special and actually in units of 100ms, but zero
- * based in the hw (so we need to add 100 ms). But the sw vbt
- * table multiplies it with 1000 to make it in units of 100usec,
- * too. */
- spec.t11_t12 = (510 + 100) * 10;
-
- intel_pps_dump_state("vbt", &vbt);
-
- /* Use the max of the register settings and vbt. If both are
- * unset, fall back to the spec limits. */
-#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
- spec.field : \
- max(cur.field, vbt.field))
- assign_final(t1_t3);
- assign_final(t8);
- assign_final(t9);
- assign_final(t10);
- assign_final(t11_t12);
-#undef assign_final
-
-#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
- intel_dp->panel_power_up_delay = get_delay(t1_t3);
- intel_dp->backlight_on_delay = get_delay(t8);
- intel_dp->backlight_off_delay = get_delay(t9);
- intel_dp->panel_power_down_delay = get_delay(t10);
- intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
-#undef get_delay
-
- drm_dbg_kms(&dev_priv->drm,
- "panel power up delay %d, power down delay %d, power cycle delay %d\n",
- intel_dp->panel_power_up_delay,
- intel_dp->panel_power_down_delay,
- intel_dp->panel_power_cycle_delay);
-
- drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
- intel_dp->backlight_on_delay,
- intel_dp->backlight_off_delay);
-
- /*
- * We override the HW backlight delays to 1 because we do manual waits
- * on them. For T8, even BSpec recommends doing it. For T9, if we
- * don't do this, we'll end up waiting for the backlight off delay
- * twice: once when we do the manual sleep, and once when we disable
- * the panel and wait for the PP_STATUS bit to become zero.
- */
- final->t8 = 1;
- final->t9 = 1;
-
- /*
- * HW has only a 100msec granularity for t11_t12 so round it up
- * accordingly.
- */
- final->t11_t12 = roundup(final->t11_t12, 100 * 10);
-}
-
-static void
-intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
- bool force_disable_vdd)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp_on, pp_off, port_sel = 0;
- int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
- struct pps_registers regs;
- enum port port = dp_to_dig_port(intel_dp)->base.port;
- const struct edp_power_seq *seq = &intel_dp->pps_delays;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- intel_pps_get_registers(intel_dp, &regs);
-
- /*
- * On some VLV machines the BIOS can leave the VDD
- * enabled even on power sequencers which aren't
- * hooked up to any port. This would mess up the
- * power domain tracking the first time we pick
- * one of these power sequencers for use since
- * edp_panel_vdd_on() would notice that the VDD was
- * already on and therefore wouldn't grab the power
- * domain reference. Disable VDD first to avoid this.
- * This also avoids spuriously turning the VDD on as
- * soon as the new power sequencer gets initialized.
- */
- if (force_disable_vdd) {
- u32 pp = ilk_get_pp_control(intel_dp);
-
- drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
- "Panel power already on\n");
-
- if (pp & EDP_FORCE_VDD)
- drm_dbg_kms(&dev_priv->drm,
- "VDD already on, disabling first\n");
-
- pp &= ~EDP_FORCE_VDD;
-
- intel_de_write(dev_priv, regs.pp_ctrl, pp);
- }
-
- pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
- REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
- pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
- REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
-
- /* Haswell doesn't have any port selection bits for the panel
- * power sequencer any more. */
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- port_sel = PANEL_PORT_SELECT_VLV(port);
- } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
- switch (port) {
- case PORT_A:
- port_sel = PANEL_PORT_SELECT_DPA;
- break;
- case PORT_C:
- port_sel = PANEL_PORT_SELECT_DPC;
- break;
- case PORT_D:
- port_sel = PANEL_PORT_SELECT_DPD;
- break;
- default:
- MISSING_CASE(port);
- break;
- }
- }
-
- pp_on |= port_sel;
-
- intel_de_write(dev_priv, regs.pp_on, pp_on);
- intel_de_write(dev_priv, regs.pp_off, pp_off);
-
- /*
- * Compute the divisor for the pp clock, simply match the Bspec formula.
- */
- if (i915_mmio_reg_valid(regs.pp_div)) {
- intel_de_write(dev_priv, regs.pp_div,
- REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
- } else {
- u32 pp_ctl;
-
- pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
- pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
- pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
- intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
- intel_de_read(dev_priv, regs.pp_on),
- intel_de_read(dev_priv, regs.pp_off),
- i915_mmio_reg_valid(regs.pp_div) ?
- intel_de_read(dev_priv, regs.pp_div) :
- (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
-}
-
-static void intel_dp_pps_init(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_initial_power_sequencer_setup(intel_dp);
- } else {
- intel_dp_init_panel_power_sequencer(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
- }
+ if (HAS_VRR(dev_priv))
+ drm_connector_attach_vrr_capable_property(connector);
}
/**
@@ -7890,14 +6409,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct drm_display_mode *downclock_mode = NULL;
bool has_dpcd;
enum pipe pipe = INVALID_PIPE;
- intel_wakeref_t wakeref;
struct edid *edid;
if (!intel_dp_is_edp(intel_dp))
return true;
- INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
-
/*
* On IBX/CPT we may get here with LVDS already registered. Since the
* driver uses the only internal power sequencer available for both
@@ -7913,11 +6429,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
return false;
}
- with_pps_lock(intel_dp, wakeref) {
- intel_dp_init_panel_power_timestamps(intel_dp);
- intel_dp_pps_init(intel_dp);
- intel_edp_panel_vdd_sanitize(intel_dp);
- }
+ intel_pps_init(intel_dp);
/* Cache DPCD and EDID for edp. */
has_dpcd = intel_edp_init_dpcd(intel_dp);
@@ -7934,7 +6446,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
drm_connector_update_edid_property(connector, edid);
- intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
} else {
kfree(edid);
edid = ERR_PTR(-EINVAL);
@@ -7962,7 +6473,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
pipe = vlv_active_pipe(intel_dp);
if (pipe != PIPE_A && pipe != PIPE_B)
- pipe = intel_dp->pps_pipe;
+ pipe = intel_dp->pps.pps_pipe;
if (pipe != PIPE_A && pipe != PIPE_B)
pipe = PIPE_A;
@@ -7973,7 +6484,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
- intel_connector->panel.backlight.power = intel_edp_backlight_power;
+ intel_connector->panel.backlight.power = intel_pps_backlight_power;
intel_panel_setup_backlight(connector, pipe);
if (fixed_mode) {
@@ -7985,13 +6496,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
return true;
out_vdd_off:
- cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- /*
- * vdd might still be enabled do to the delayed vdd off.
- * Make sure vdd is actually turned off here.
- */
- with_pps_lock(intel_dp, wakeref)
- edp_panel_vdd_off_sync(intel_dp);
+ intel_pps_vdd_off_sync(intel_dp);
return false;
}
@@ -8045,8 +6550,8 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_dp_set_source_rates(intel_dp);
intel_dp->reset_link_params = true;
- intel_dp->pps_pipe = INVALID_PIPE;
- intel_dp->active_pipe = INVALID_PIPE;
+ intel_dp->pps.pps_pipe = INVALID_PIPE;
+ intel_dp->pps.active_pipe = INVALID_PIPE;
/* Preserve the current hw state. */
intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
@@ -8064,7 +6569,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- intel_dp->active_pipe = vlv_active_pipe(intel_dp);
+ intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
/*
* For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
@@ -8133,6 +6638,9 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
(temp & ~0xf) | 0xd);
}
+ intel_dp->frl.is_trained = false;
+ intel_dp->frl.trained_rate_gbps = 0;
+
return true;
fail:
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 6620f9efdcbb..d80839139bfb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -70,16 +70,11 @@ enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
-void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
-void intel_edp_panel_on(struct intel_dp *intel_dp);
-void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
int intel_dp_max_lane_count(struct intel_dp *intel_dp);
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
-void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
-u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
@@ -141,5 +136,11 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
void intel_dp_sync_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+const struct dpll *vlv_get_dpll(struct drm_i915_private *i915);
+
+void intel_dp_check_frl_training(struct intel_dp *intel_dp);
+void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_dp_phy_test(struct intel_encoder *encoder);
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
new file mode 100644
index 000000000000..eaebf123310a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020-2021 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_display_types.h"
+#include "intel_dp_aux.h"
+#include "intel_pps.h"
+#include "intel_tc.h"
+
+u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
+{
+ int i;
+ u32 v = 0;
+
+ if (src_bytes > 4)
+ src_bytes = 4;
+ for (i = 0; i < src_bytes; i++)
+ v |= ((u32)src[i]) << ((3 - i) * 8);
+ return v;
+}
+
+static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
+{
+ int i;
+
+ if (dst_bytes > 4)
+ dst_bytes = 4;
+ for (i = 0; i < dst_bytes; i++)
+ dst[i] = src >> ((3 - i) * 8);
+}
+
+static u32
+intel_dp_aux_wait_done(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
+ const unsigned int timeout_ms = 10;
+ u32 status;
+ bool done;
+
+#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ done = wait_event_timeout(i915->gmbus_wait_queue, C,
+ msecs_to_jiffies_timeout(timeout_ms));
+
+ /* just trace the final value */
+ trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
+
+ if (!done)
+ drm_err(&i915->drm,
+ "%s: did not complete or timeout within %ums (status 0x%08x)\n",
+ intel_dp->aux.name, timeout_ms, status);
+#undef C
+
+ return status;
+}
+
+static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (index)
+ return 0;
+
+ /*
+ * The clock divider is based off the hrawclk, and would like to run at
+ * 2MHz. So, take the hrawclk value and divide by 2000 and use that
+ */
+ return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
+}
+
+static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ u32 freq;
+
+ if (index)
+ return 0;
+
+ /*
+ * The clock divider is based off the cdclk or PCH rawclk, and would
+ * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
+ * divide by 2000 and use that
+ */
+ if (dig_port->aux_ch == AUX_CH_A)
+ freq = dev_priv->cdclk.hw.cdclk;
+ else
+ freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
+ return DIV_ROUND_CLOSEST(freq, 2000);
+}
+
+static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+
+ if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
+ /* Workaround for non-ULT HSW */
+ switch (index) {
+ case 0: return 63;
+ case 1: return 72;
+ default: return 0;
+ }
+ }
+
+ return ilk_get_aux_clock_divider(intel_dp, index);
+}
+
+static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ /*
+ * SKL doesn't need us to program the AUX clock divider (Hardware will
+ * derive the clock from CDCLK automatically). We still implement the
+ * get_aux_clock_divider vfunc to plug-in into the existing code.
+ */
+ return index ? 0 : 1;
+}
+
+static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
+ int send_bytes,
+ u32 aux_clock_divider)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv =
+ to_i915(dig_port->base.base.dev);
+ u32 precharge, timeout;
+
+ if (IS_GEN(dev_priv, 6))
+ precharge = 3;
+ else
+ precharge = 5;
+
+ if (IS_BROADWELL(dev_priv))
+ timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
+ else
+ timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
+
+ return DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_INTERRUPT |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ timeout |
+ DP_AUX_CH_CTL_RECEIVE_ERROR |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
+}
+
+static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+ int send_bytes,
+ u32 unused)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 =
+ to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+ u32 ret;
+
+ ret = DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_INTERRUPT |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_TIME_OUT_MAX |
+ DP_AUX_CH_CTL_RECEIVE_ERROR |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
+ DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
+
+ if (intel_phy_is_tc(i915, phy) &&
+ dig_port->tc_mode == TC_PORT_TBT_ALT)
+ ret |= DP_AUX_CH_CTL_TBT_IO;
+
+ return ret;
+}
+
+static int
+intel_dp_aux_xfer(struct intel_dp *intel_dp,
+ const u8 *send, int send_bytes,
+ u8 *recv, int recv_size,
+ u32 aux_send_ctl_flags)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 =
+ to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+ bool is_tc_port = intel_phy_is_tc(i915, phy);
+ i915_reg_t ch_ctl, ch_data[5];
+ u32 aux_clock_divider;
+ enum intel_display_power_domain aux_domain;
+ intel_wakeref_t aux_wakeref;
+ intel_wakeref_t pps_wakeref;
+ int i, ret, recv_bytes;
+ int try, clock = 0;
+ u32 status;
+ bool vdd;
+
+ ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
+ for (i = 0; i < ARRAY_SIZE(ch_data); i++)
+ ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
+
+ if (is_tc_port)
+ intel_tc_port_lock(dig_port);
+
+ aux_domain = intel_aux_power_domain(dig_port);
+
+ aux_wakeref = intel_display_power_get(i915, aux_domain);
+ pps_wakeref = intel_pps_lock(intel_dp);
+
+ /*
+ * We will be called with VDD already enabled for dpcd/edid/oui reads.
+ * In such cases we want to leave VDD enabled and it's up to upper layers
+ * to turn it off. But for eg. i2c-dev access we need to turn it on/off
+ * ourselves.
+ */
+ vdd = intel_pps_vdd_on_unlocked(intel_dp);
+
+ /*
+ * dp aux is extremely sensitive to irq latency, hence request the
+ * lowest possible wakeup latency and so prevent the cpu from going into
+ * deep sleep states.
+ */
+ cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
+
+ intel_pps_check_power_unlocked(intel_dp);
+
+ /* Try to wait for any previous AUX channel activity */
+ for (try = 0; try < 3; try++) {
+ status = intel_uncore_read_notrace(uncore, ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+ /* just trace the final value */
+ trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
+
+ if (try == 3) {
+ const u32 status = intel_uncore_read(uncore, ch_ctl);
+
+ if (status != intel_dp->aux_busy_last_status) {
+ drm_WARN(&i915->drm, 1,
+ "%s: not started (status 0x%08x)\n",
+ intel_dp->aux.name, status);
+ intel_dp->aux_busy_last_status = status;
+ }
+
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Only 5 data registers! */
+ if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
+ u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
+ send_bytes,
+ aux_clock_divider);
+
+ send_ctl |= aux_send_ctl_flags;
+
+ /* Must try at least 3 times according to DP spec */
+ for (try = 0; try < 5; try++) {
+ /* Load the send data into the aux channel data registers */
+ for (i = 0; i < send_bytes; i += 4)
+ intel_uncore_write(uncore,
+ ch_data[i >> 2],
+ intel_dp_pack_aux(send + i,
+ send_bytes - i));
+
+ /* Send the command and wait for it to complete */
+ intel_uncore_write(uncore, ch_ctl, send_ctl);
+
+ status = intel_dp_aux_wait_done(intel_dp);
+
+ /* Clear done status and any errors */
+ intel_uncore_write(uncore,
+ ch_ctl,
+ status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ /*
+ * DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
+ * 400us delay required for errors and timeouts
+ * Timeout errors from the HW already meet this
+ * requirement so skip to next iteration
+ */
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
+ continue;
+
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ usleep_range(400, 500);
+ continue;
+ }
+ if (status & DP_AUX_CH_CTL_DONE)
+ goto done;
+ }
+ }
+
+ if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+ drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
+ intel_dp->aux.name, status);
+ ret = -EBUSY;
+ goto out;
+ }
+
+done:
+ /*
+ * Check for timeout or receive error. Timeouts occur when the sink is
+ * not connected.
+ */
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
+ intel_dp->aux.name, status);
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * Timeouts occur when the device isn't connected, so they're "normal"
+ * -- don't fill the kernel log with these
+ */
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+ drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
+ intel_dp->aux.name, status);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* Unload any bytes sent back from the other side */
+ recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+ DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+
+ /*
+ * By BSpec: "Message sizes of 0 or >20 are not allowed."
+ * We have no idea of what happened so we return -EBUSY so
+ * drm layer takes care for the necessary retries.
+ */
+ if (recv_bytes == 0 || recv_bytes > 20) {
+ drm_dbg_kms(&i915->drm,
+ "%s: Forbidden recv_bytes = %d on aux transaction\n",
+ intel_dp->aux.name, recv_bytes);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (recv_bytes > recv_size)
+ recv_bytes = recv_size;
+
+ for (i = 0; i < recv_bytes; i += 4)
+ intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
+ recv + i, recv_bytes - i);
+
+ ret = recv_bytes;
+out:
+ cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ if (vdd)
+ intel_pps_vdd_off_unlocked(intel_dp, false);
+
+ intel_pps_unlock(intel_dp, pps_wakeref);
+ intel_display_power_put_async(i915, aux_domain, aux_wakeref);
+
+ if (is_tc_port)
+ intel_tc_port_unlock(dig_port);
+
+ return ret;
+}
+
+#define BARE_ADDRESS_SIZE 3
+#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
+
+static void
+intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
+ const struct drm_dp_aux_msg *msg)
+{
+ txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
+ txbuf[1] = (msg->address >> 8) & 0xff;
+ txbuf[2] = msg->address & 0xff;
+ txbuf[3] = msg->size - 1;
+}
+
+static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
+{
+ /*
+ * If we're trying to send the HDCP Aksv, we need to set a the Aksv
+ * select bit to inform the hardware to send the Aksv after our header
+ * since we can't access that data from software.
+ */
+ if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
+ msg->address == DP_AUX_HDCP_AKSV)
+ return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
+
+ return 0;
+}
+
+static ssize_t
+intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 txbuf[20], rxbuf[20];
+ size_t txsize, rxsize;
+ u32 flags = intel_dp_aux_xfer_flags(msg);
+ int ret;
+
+ intel_dp_aux_header(txbuf, msg);
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE:
+ case DP_AUX_I2C_WRITE_STATUS_UPDATE:
+ txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
+ rxsize = 2; /* 0 or 1 data bytes */
+
+ if (drm_WARN_ON(&i915->drm, txsize > 20))
+ return -E2BIG;
+
+ drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
+
+ if (msg->buffer)
+ memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
+
+ ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
+ rxbuf, rxsize, flags);
+ if (ret > 0) {
+ msg->reply = rxbuf[0] >> 4;
+
+ if (ret > 1) {
+ /* Number of bytes written in a short write. */
+ ret = clamp_t(int, rxbuf[1], 0, msg->size);
+ } else {
+ /* Return payload size. */
+ ret = msg->size;
+ }
+ }
+ break;
+
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
+ rxsize = msg->size + 1;
+
+ if (drm_WARN_ON(&i915->drm, rxsize > 20))
+ return -E2BIG;
+
+ ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
+ rxbuf, rxsize, flags);
+ if (ret > 0) {
+ msg->reply = rxbuf[0] >> 4;
+ /*
+ * Assume happy day, and copy the data. The caller is
+ * expected to check msg->reply before touching it.
+ *
+ * Return payload size.
+ */
+ ret--;
+ memcpy(msg->buffer, rxbuf + 1, ret);
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_B);
+ }
+}
+
+static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_B, index);
+ }
+}
+
+static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ return DP_AUX_CH_CTL(aux_ch);
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return PCH_DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_A);
+ }
+}
+
+static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return PCH_DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
+}
+
+static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ case AUX_CH_E:
+ case AUX_CH_F:
+ return DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_A);
+ }
+}
+
+static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ case AUX_CH_E:
+ case AUX_CH_F:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
+}
+
+static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_USBC1:
+ case AUX_CH_USBC2:
+ case AUX_CH_USBC3:
+ case AUX_CH_USBC4:
+ case AUX_CH_USBC5:
+ case AUX_CH_USBC6:
+ return DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_A);
+ }
+}
+
+static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_USBC1:
+ case AUX_CH_USBC2:
+ case AUX_CH_USBC3:
+ case AUX_CH_USBC4:
+ case AUX_CH_USBC5:
+ case AUX_CH_USBC6:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
+}
+
+void intel_dp_aux_fini(struct intel_dp *intel_dp)
+{
+ if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
+ cpu_latency_qos_remove_request(&intel_dp->pm_qos);
+
+ kfree(intel_dp->aux.name);
+}
+
+void intel_dp_aux_init(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
+ } else if (INTEL_GEN(dev_priv) >= 9) {
+ intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = skl_aux_data_reg;
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
+ } else {
+ intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
+ else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
+ else
+ intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
+ else
+ intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
+
+ drm_dp_aux_init(&intel_dp->aux);
+
+ /* Failure to allocate our preferred name is not critical */
+ if (INTEL_GEN(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1)
+ intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s",
+ aux_ch - AUX_CH_USBC1 + '1',
+ encoder->base.name);
+ else
+ intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s",
+ aux_ch_name(aux_ch),
+ encoder->base.name);
+
+ intel_dp->aux.transfer = intel_dp_aux_transfer;
+ cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h
new file mode 100644
index 000000000000..4afbe76217b9
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020-2021 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_AUX_H__
+#define __INTEL_DP_AUX_H__
+
+#include <linux/types.h>
+
+struct intel_dp;
+
+u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
+
+void intel_dp_aux_fini(struct intel_dp *intel_dp);
+void intel_dp_aux_init(struct intel_dp *intel_dp);
+
+#endif /* __INTEL_DP_AUX_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 51d27fc98d48..651884390137 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -22,10 +22,253 @@
*
*/
+/*
+ * Laptops with Intel GPUs which have panels that support controlling the
+ * backlight through DP AUX can actually use two different interfaces: Intel's
+ * proprietary DP AUX backlight interface, and the standard VESA backlight
+ * interface. Unfortunately, at the time of writing this a lot of laptops will
+ * advertise support for the standard VESA backlight interface when they
+ * don't properly support it. However, on these systems the Intel backlight
+ * interface generally does work properly. Additionally, these systems will
+ * usually just indicate that they use PWM backlight controls in their VBIOS
+ * for some reason.
+ */
+
#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
+#include "intel_panel.h"
+
+/* TODO:
+ * Implement HDR, right now we just implement the bare minimum to bring us back into SDR mode so we
+ * can make people's backlights work in the mean time
+ */
+
+/*
+ * DP AUX registers for Intel's proprietary HDR backlight interface. We define
+ * them here since we'll likely be the only driver to ever use these.
+ */
+#define INTEL_EDP_HDR_TCON_CAP0 0x340
+
+#define INTEL_EDP_HDR_TCON_CAP1 0x341
+# define INTEL_EDP_HDR_TCON_2084_DECODE_CAP BIT(0)
+# define INTEL_EDP_HDR_TCON_2020_GAMUT_CAP BIT(1)
+# define INTEL_EDP_HDR_TCON_TONE_MAPPING_CAP BIT(2)
+# define INTEL_EDP_HDR_TCON_SEGMENTED_BACKLIGHT_CAP BIT(3)
+# define INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP BIT(4)
+# define INTEL_EDP_HDR_TCON_OPTIMIZATION_CAP BIT(5)
+# define INTEL_EDP_HDR_TCON_SDP_COLORIMETRY_CAP BIT(6)
+# define INTEL_EDP_HDR_TCON_SRGB_TO_PANEL_GAMUT_CONVERSION_CAP BIT(7)
+
+#define INTEL_EDP_HDR_TCON_CAP2 0x342
+# define INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP BIT(0)
+
+#define INTEL_EDP_HDR_TCON_CAP3 0x343
+
+#define INTEL_EDP_HDR_GETSET_CTRL_PARAMS 0x344
+# define INTEL_EDP_HDR_TCON_2084_DECODE_ENABLE BIT(0)
+# define INTEL_EDP_HDR_TCON_2020_GAMUT_ENABLE BIT(1)
+# define INTEL_EDP_HDR_TCON_TONE_MAPPING_ENABLE BIT(2) /* Pre-TGL+ */
+# define INTEL_EDP_HDR_TCON_SEGMENTED_BACKLIGHT_ENABLE BIT(3)
+# define INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE BIT(4)
+# define INTEL_EDP_HDR_TCON_SRGB_TO_PANEL_GAMUT_ENABLE BIT(5)
+/* Bit 6 is reserved */
+# define INTEL_EDP_HDR_TCON_SDP_COLORIMETRY_ENABLE BIT(7)
+
+#define INTEL_EDP_HDR_CONTENT_LUMINANCE 0x346 /* Pre-TGL+ */
+#define INTEL_EDP_HDR_PANEL_LUMINANCE_OVERRIDE 0x34A
+#define INTEL_EDP_SDR_LUMINANCE_LEVEL 0x352
+#define INTEL_EDP_BRIGHTNESS_NITS_LSB 0x354
+#define INTEL_EDP_BRIGHTNESS_NITS_MSB 0x355
+#define INTEL_EDP_BRIGHTNESS_DELAY_FRAMES 0x356
+#define INTEL_EDP_BRIGHTNESS_PER_FRAME_STEPS 0x357
+
+#define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_0 0x358
+# define INTEL_EDP_TCON_USAGE_MASK GENMASK(0, 3)
+# define INTEL_EDP_TCON_USAGE_UNKNOWN 0x0
+# define INTEL_EDP_TCON_USAGE_DESKTOP 0x1
+# define INTEL_EDP_TCON_USAGE_FULL_SCREEN_MEDIA 0x2
+# define INTEL_EDP_TCON_USAGE_FULL_SCREEN_GAMING 0x3
+# define INTEL_EDP_TCON_POWER_MASK BIT(4)
+# define INTEL_EDP_TCON_POWER_DC (0 << 4)
+# define INTEL_EDP_TCON_POWER_AC (1 << 4)
+# define INTEL_EDP_TCON_OPTIMIZATION_STRENGTH_MASK GENMASK(5, 7)
+
+#define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_1 0x359
+
+/* Intel EDP backlight callbacks */
+static bool
+intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ struct drm_dp_aux *aux = &intel_dp->aux;
+ struct intel_panel *panel = &connector->panel;
+ int ret;
+ u8 tcon_cap[4];
+
+ ret = drm_dp_dpcd_read(aux, INTEL_EDP_HDR_TCON_CAP0, tcon_cap, sizeof(tcon_cap));
+ if (ret < 0)
+ return false;
+
+ if (!(tcon_cap[1] & INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP))
+ return false;
+
+ if (tcon_cap[0] >= 1) {
+ drm_dbg_kms(&i915->drm, "Detected Intel HDR backlight interface version %d\n",
+ tcon_cap[0]);
+ } else {
+ drm_dbg_kms(&i915->drm, "Detected unsupported HDR backlight interface version %d\n",
+ tcon_cap[0]);
+ return false;
+ }
+
+ panel->backlight.edp.intel.sdr_uses_aux =
+ tcon_cap[2] & INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP;
+
+ return true;
+}
+
+static u32
+intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ u8 tmp;
+ u8 buf[2] = { 0 };
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &tmp) < 0) {
+ drm_err(&i915->drm, "Failed to read current backlight mode from DPCD\n");
+ return 0;
+ }
+
+ if (!(tmp & INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE)) {
+ if (!panel->backlight.edp.intel.sdr_uses_aux) {
+ u32 pwm_level = panel->backlight.pwm_funcs->get(connector, pipe);
+
+ return intel_panel_backlight_level_from_pwm(connector, pwm_level);
+ }
+
+ /* Assume 100% brightness if backlight controls aren't enabled yet */
+ return panel->backlight.max;
+ }
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, sizeof(buf)) < 0) {
+ drm_err(&i915->drm, "Failed to read brightness from DPCD\n");
+ return 0;
+ }
+
+ return (buf[1] << 8 | buf[0]);
+}
+
+static void
+intel_dp_aux_hdr_set_aux_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_device *dev = connector->base.dev;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ u8 buf[4] = { 0 };
+
+ buf[0] = level & 0xFF;
+ buf[1] = (level & 0xFF00) >> 8;
+
+ if (drm_dp_dpcd_write(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, 4) < 0)
+ drm_err(dev, "Failed to write brightness level to DPCD\n");
+}
+
+static void
+intel_dp_aux_hdr_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ if (panel->backlight.edp.intel.sdr_uses_aux) {
+ intel_dp_aux_hdr_set_aux_backlight(conn_state, level);
+ } else {
+ const u32 pwm_level = intel_panel_backlight_level_to_pwm(connector, level);
+
+ intel_panel_set_pwm_level(conn_state, pwm_level);
+ }
+}
+
+static void
+intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ int ret;
+ u8 old_ctrl, ctrl;
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl);
+ if (ret < 0) {
+ drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret);
+ return;
+ }
+
+ ctrl = old_ctrl;
+ if (panel->backlight.edp.intel.sdr_uses_aux) {
+ ctrl |= INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE;
+ intel_dp_aux_hdr_set_aux_backlight(conn_state, level);
+ } else {
+ u32 pwm_level = intel_panel_backlight_level_to_pwm(connector, level);
+
+ panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level);
+
+ ctrl &= ~INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE;
+ }
+
+ if (ctrl != old_ctrl)
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) < 0)
+ drm_err(&i915->drm, "Failed to configure DPCD brightness controls\n");
+}
+
+static void
+intel_dp_aux_hdr_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ /* Nothing to do for AUX based backlight controls */
+ if (panel->backlight.edp.intel.sdr_uses_aux)
+ return;
+
+ /* Note we want the actual pwm_level to be 0, regardless of pwm_min */
+ panel->backlight.pwm_funcs->disable(conn_state, intel_panel_invert_pwm_level(connector, 0));
+}
+
+static int
+intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ int ret;
+
+ if (panel->backlight.edp.intel.sdr_uses_aux) {
+ drm_dbg_kms(&i915->drm, "SDR backlight is controlled through DPCD\n");
+ } else {
+ drm_dbg_kms(&i915->drm, "SDR backlight is controlled through PWM\n");
+
+ ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+ if (ret < 0) {
+ drm_err(&i915->drm,
+ "Failed to setup SDR backlight controls through PWM: %d\n", ret);
+ return ret;
+ }
+ }
+
+ panel->backlight.max = 512;
+ panel->backlight.min = 0;
+ panel->backlight.level = intel_dp_aux_hdr_get_backlight(connector, pipe);
+ panel->backlight.enabled = panel->backlight.level != 0;
-static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
+ return 0;
+}
+
+/* VESA backlight callbacks */
+static void set_vesa_backlight_enable(struct intel_dp *intel_dp, bool enable)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 reg_val = 0;
@@ -52,7 +295,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
}
}
-static bool intel_dp_aux_backlight_dpcd_mode(struct intel_connector *connector)
+static bool intel_dp_aux_vesa_backlight_dpcd_mode(struct intel_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -75,7 +318,7 @@ static bool intel_dp_aux_backlight_dpcd_mode(struct intel_connector *connector)
* Read the current backlight value from DPCD register(s) based
* on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
*/
-static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
+static u32 intel_dp_aux_vesa_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -86,7 +329,7 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
* If we're not in DPCD control mode yet, the programmed brightness
* value is meaningless and we should assume max brightness
*/
- if (!intel_dp_aux_backlight_dpcd_mode(connector))
+ if (!intel_dp_aux_vesa_backlight_dpcd_mode(connector))
return connector->panel.backlight.max;
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
@@ -107,7 +350,8 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
* 8-bit or 16 bit value (MSB and LSB)
*/
static void
-intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state,
+ u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
@@ -137,11 +381,11 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
* - Where P = 2^Pn, where Pn is the value programmed by field 4:0 of the
* EDP_PWMGEN_BIT_COUNT register (DPCD Address 00724h)
*/
-static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
+static bool intel_dp_aux_vesa_set_pwm_freq(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_dp *intel_dp = intel_attached_dp(connector);
- const u8 pn = connector->panel.backlight.pwmgen_bit_count;
+ const u8 pn = connector->panel.backlight.edp.vesa.pwmgen_bit_count;
int freq, fxp, f, fxp_actual, fxp_min, fxp_max;
freq = dev_priv->vbt.backlight.pwm_freq_hz;
@@ -173,14 +417,16 @@ static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
return true;
}
-static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static void
+intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_panel *panel = &connector->panel;
u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
+ u8 pwmgen_bit_count = panel->backlight.edp.vesa.pwmgen_bit_count;
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) {
@@ -201,7 +447,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_PWMGEN_BIT_COUNT,
- panel->backlight.pwmgen_bit_count) < 0)
+ pwmgen_bit_count) < 0)
drm_dbg_kms(&i915->drm,
"Failed to write aux pwmgen bit count\n");
@@ -214,7 +460,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
}
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP)
- if (intel_dp_aux_set_pwm_freq(connector))
+ if (intel_dp_aux_vesa_set_pwm_freq(connector))
new_dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE;
if (new_dpcd_buf != dpcd_buf) {
@@ -225,18 +471,18 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
}
}
- intel_dp_aux_set_backlight(conn_state,
- connector->panel.backlight.level);
- set_aux_backlight_enable(intel_dp, true);
+ intel_dp_aux_vesa_set_backlight(conn_state, level);
+ set_vesa_backlight_enable(intel_dp, true);
}
-static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state *old_conn_state,
+ u32 level)
{
- set_aux_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)),
- false);
+ set_vesa_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)),
+ false);
}
-static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
+static u32 intel_dp_aux_vesa_calc_max_backlight(struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dp *intel_dp = intel_attached_dp(connector);
@@ -309,40 +555,45 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
"Failed to write aux pwmgen bit count\n");
return max_backlight;
}
- panel->backlight.pwmgen_bit_count = pn;
+ panel->backlight.edp.vesa.pwmgen_bit_count = pn;
max_backlight = (1 << pn) - 1;
return max_backlight;
}
-static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
- enum pipe pipe)
+static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
+ enum pipe pipe)
{
struct intel_panel *panel = &connector->panel;
- panel->backlight.max = intel_dp_aux_calc_max_backlight(connector);
+ panel->backlight.max = intel_dp_aux_vesa_calc_max_backlight(connector);
if (!panel->backlight.max)
return -ENODEV;
panel->backlight.min = 0;
- panel->backlight.level = intel_dp_aux_get_backlight(connector);
- panel->backlight.enabled = intel_dp_aux_backlight_dpcd_mode(connector) &&
+ panel->backlight.level = intel_dp_aux_vesa_get_backlight(connector, pipe);
+ panel->backlight.enabled = intel_dp_aux_vesa_backlight_dpcd_mode(connector) &&
panel->backlight.level != 0;
return 0;
}
static bool
-intel_dp_aux_display_control_capable(struct intel_connector *connector)
+intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
/* Check the eDP Display control capabilities registers to determine if
- * the panel can support backlight control over the aux channel
+ * the panel can support backlight control over the aux channel.
+ *
+ * TODO: We currently only support AUX only backlight configurations, not backlights which
+ * require a mix of PWM and AUX controls to work. In the mean time, these machines typically
+ * work just fine using normal PWM controls anyway.
*/
if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
+ (intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP)) {
drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
return true;
@@ -350,40 +601,89 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
return false;
}
-int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
+static const struct intel_panel_bl_funcs intel_dp_hdr_bl_funcs = {
+ .setup = intel_dp_aux_hdr_setup_backlight,
+ .enable = intel_dp_aux_hdr_enable_backlight,
+ .disable = intel_dp_aux_hdr_disable_backlight,
+ .set = intel_dp_aux_hdr_set_backlight,
+ .get = intel_dp_aux_hdr_get_backlight,
+};
+
+static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = {
+ .setup = intel_dp_aux_vesa_setup_backlight,
+ .enable = intel_dp_aux_vesa_enable_backlight,
+ .disable = intel_dp_aux_vesa_disable_backlight,
+ .set = intel_dp_aux_vesa_set_backlight,
+ .get = intel_dp_aux_vesa_get_backlight,
+};
+
+enum intel_dp_aux_backlight_modparam {
+ INTEL_DP_AUX_BACKLIGHT_AUTO = -1,
+ INTEL_DP_AUX_BACKLIGHT_OFF = 0,
+ INTEL_DP_AUX_BACKLIGHT_ON = 1,
+ INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2,
+ INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
+};
+
+int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
{
- struct intel_panel *panel = &intel_connector->panel;
- struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder);
+ struct drm_device *dev = connector->base.dev;
+ struct intel_panel *panel = &connector->panel;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ bool try_intel_interface = false, try_vesa_interface = false;
- if (i915->params.enable_dpcd_backlight == 0 ||
- !intel_dp_aux_display_control_capable(intel_connector))
+ /* Check the VBT and user's module parameters to figure out which
+ * interfaces to probe
+ */
+ switch (i915->params.enable_dpcd_backlight) {
+ case INTEL_DP_AUX_BACKLIGHT_OFF:
return -ENODEV;
+ case INTEL_DP_AUX_BACKLIGHT_AUTO:
+ switch (i915->vbt.backlight.type) {
+ case INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE:
+ try_vesa_interface = true;
+ break;
+ case INTEL_BACKLIGHT_DISPLAY_DDI:
+ try_intel_interface = true;
+ try_vesa_interface = true;
+ break;
+ default:
+ return -ENODEV;
+ }
+ break;
+ case INTEL_DP_AUX_BACKLIGHT_ON:
+ if (i915->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE)
+ try_intel_interface = true;
+
+ try_vesa_interface = true;
+ break;
+ case INTEL_DP_AUX_BACKLIGHT_FORCE_VESA:
+ try_vesa_interface = true;
+ break;
+ case INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL:
+ try_intel_interface = true;
+ break;
+ }
/*
- * There are a lot of machines that don't advertise the backlight
- * control interface to use properly in their VBIOS, :\
+ * A lot of eDP panels in the wild will report supporting both the
+ * Intel proprietary backlight control interface, and the VESA
+ * backlight control interface. Many of these panels are liars though,
+ * and will only work with the Intel interface. So, always probe for
+ * that first.
*/
- if (i915->vbt.backlight.type !=
- INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
- i915->params.enable_dpcd_backlight != 1 &&
- !drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
- DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
- drm_info(&i915->drm,
- "Panel advertises DPCD backlight support, but "
- "VBT disagrees. If your backlight controls "
- "don't work try booting with "
- "i915.enable_dpcd_backlight=1. If your machine "
- "needs this, please file a _new_ bug report on "
- "drm/i915, see " FDO_BUG_URL " for details.\n");
- return -ENODEV;
+ if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector)) {
+ drm_dbg_kms(dev, "Using Intel proprietary eDP backlight controls\n");
+ panel->backlight.funcs = &intel_dp_hdr_bl_funcs;
+ return 0;
}
- panel->backlight.setup = intel_dp_aux_setup_backlight;
- panel->backlight.enable = intel_dp_aux_enable_backlight;
- panel->backlight.disable = intel_dp_aux_disable_backlight;
- panel->backlight.set = intel_dp_aux_set_backlight;
- panel->backlight.get = intel_dp_aux_get_backlight;
+ if (try_vesa_interface && intel_dp_aux_supports_vesa_backlight(connector)) {
+ drm_dbg_kms(dev, "Using VESA eDP backlight controls\n");
+ panel->backlight.funcs = &intel_dp_vesa_bl_funcs;
+ return 0;
+ }
- return 0;
+ return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index 03424d20e9f7..4dba5bb15af5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -16,6 +16,30 @@
#include "intel_dp.h"
#include "intel_hdcp.h"
+static unsigned int transcoder_to_stream_enc_status(enum transcoder cpu_transcoder)
+{
+ u32 stream_enc_mask;
+
+ switch (cpu_transcoder) {
+ case TRANSCODER_A:
+ stream_enc_mask = HDCP_STATUS_STREAM_A_ENC;
+ break;
+ case TRANSCODER_B:
+ stream_enc_mask = HDCP_STATUS_STREAM_B_ENC;
+ break;
+ case TRANSCODER_C:
+ stream_enc_mask = HDCP_STATUS_STREAM_C_ENC;
+ break;
+ case TRANSCODER_D:
+ stream_enc_mask = HDCP_STATUS_STREAM_D_ENC;
+ break;
+ default:
+ stream_enc_mask = 0;
+ }
+
+ return stream_enc_mask;
+}
+
static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
{
long ret;
@@ -561,7 +585,8 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port,
}
static
-int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port)
+int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
{
u8 rx_status;
int ret;
@@ -622,48 +647,143 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
};
static int
-intel_dp_mst_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
- enum transcoder cpu_transcoder,
- bool enable)
+intel_dp_mst_toggle_hdcp_stream_select(struct intel_connector *connector,
+ bool enable)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- if (!enable)
- usleep_range(6, 60); /* Bspec says >= 6us */
-
- ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base,
- cpu_transcoder, enable);
+ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base,
+ hdcp->stream_transcoder, enable,
+ TRANS_DDI_HDCP_SELECT);
if (ret)
- drm_dbg_kms(&i915->drm, "%s HDCP signalling failed (%d)\n",
- enable ? "Enable" : "Disable", ret);
+ drm_err(&i915->drm, "%s HDCP stream select failed (%d)\n",
+ enable ? "Enable" : "Disable", ret);
return ret;
}
-static
-bool intel_dp_mst_hdcp_check_link(struct intel_digital_port *dig_port,
- struct intel_connector *connector)
+static int
+intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector,
+ bool enable)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum port port = dig_port->base.port;
+ enum transcoder cpu_transcoder = hdcp->stream_transcoder;
+ u32 stream_enc_status;
+ int ret;
+
+ ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable);
+ if (ret)
+ return ret;
+
+ stream_enc_status = transcoder_to_stream_enc_status(cpu_transcoder);
+ if (!stream_enc_status)
+ return -EINVAL;
+
+ /* Wait for encryption confirmation */
+ if (intel_de_wait_for_register(i915,
+ HDCP_STATUS(i915, cpu_transcoder, port),
+ stream_enc_status,
+ enable ? stream_enc_status : 0,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
+ transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static bool intel_dp_mst_get_qses_status(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_dp *intel_dp = &dig_port->dp;
struct drm_dp_query_stream_enc_status_ack_reply reply;
+ struct intel_dp *intel_dp = &dig_port->dp;
int ret;
- if (!intel_dp_hdcp_check_link(dig_port, connector))
- return false;
-
ret = drm_dp_send_query_stream_enc_status(&intel_dp->mst_mgr,
connector->port, &reply);
if (ret) {
drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] failed QSES ret=%d\n",
- connector->base.base.id, connector->base.name, ret);
+ "[%s:%d] failed QSES ret=%d\n",
+ connector->base.name, connector->base.base.id, ret);
return false;
}
+ drm_dbg_kms(&i915->drm, "[%s:%d] QSES stream auth: %d stream enc: %d\n",
+ connector->base.name, connector->base.base.id,
+ reply.auth_completed, reply.encryption_enabled);
+
return reply.auth_completed && reply.encryption_enabled;
}
+static int
+intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
+ bool enable)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum transcoder cpu_transcoder = hdcp->stream_transcoder;
+ enum pipe pipe = (enum pipe)cpu_transcoder;
+ enum port port = dig_port->base.port;
+ int ret;
+
+ drm_WARN_ON(&i915->drm, enable &&
+ !!(intel_de_read(i915, HDCP2_AUTH_STREAM(i915, cpu_transcoder, port))
+ & AUTH_STREAM_TYPE) != data->streams[0].stream_type);
+
+ ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable);
+ if (ret)
+ return ret;
+
+ /* Wait for encryption confirmation */
+ if (intel_de_wait_for_register(i915,
+ HDCP2_STREAM_STATUS(i915, cpu_transcoder, pipe),
+ STREAM_ENCRYPTION_STATUS,
+ enable ? STREAM_ENCRYPTION_STATUS : 0,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
+ transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * DP v2.0 I.3.3 ignore the stream signature L' in QSES reply msg reply.
+ * I.3.5 MST source device may use a QSES msg to query downstream status
+ * for a particular stream.
+ */
+static
+int intel_dp_mst_hdcp2_check_link(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
+{
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int ret;
+
+ /*
+ * We do need to do the Link Check only for the connector involved with
+ * HDCP port authentication and encryption.
+ * We can re-use the hdcp->is_repeater flag to know that the connector
+ * involved with HDCP port authentication and encryption.
+ */
+ if (hdcp->is_repeater) {
+ ret = intel_dp_hdcp2_check_link(dig_port, connector);
+ if (ret)
+ return ret;
+ }
+
+ return intel_dp_mst_get_qses_status(dig_port, connector) ? 0 : -EINVAL;
+}
+
static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
.write_an_aksv = intel_dp_hdcp_write_an_aksv,
.read_bksv = intel_dp_hdcp_read_bksv,
@@ -673,10 +793,16 @@ static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
.read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
.read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
.read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
- .toggle_signalling = intel_dp_mst_hdcp_toggle_signalling,
- .check_link = intel_dp_mst_hdcp_check_link,
+ .toggle_signalling = intel_dp_hdcp_toggle_signalling,
+ .stream_encryption = intel_dp_mst_hdcp_stream_encryption,
+ .check_link = intel_dp_hdcp_check_link,
.hdcp_capable = intel_dp_hdcp_capable,
-
+ .write_2_2_msg = intel_dp_hdcp2_write_msg,
+ .read_2_2_msg = intel_dp_hdcp2_read_msg,
+ .config_stream_type = intel_dp_hdcp2_config_stream_type,
+ .stream_2_2_encryption = intel_dp_mst_hdcp2_stream_encryption,
+ .check_2_2_link = intel_dp_mst_hdcp2_check_link,
+ .hdcp_2_2_capable = intel_dp_hdcp2_capable,
.protocol = HDCP_PROTOCOL_DP,
};
@@ -693,10 +819,10 @@ int intel_dp_init_hdcp(struct intel_digital_port *dig_port,
return 0;
if (intel_connector->mst_port)
- return intel_hdcp_init(intel_connector, port,
+ return intel_hdcp_init(intel_connector, dig_port,
&intel_dp_mst_hdcp_shim);
else if (!intel_dp_is_edp(intel_dp))
- return intel_hdcp_init(intel_connector, port,
+ return intel_hdcp_init(intel_connector, dig_port,
&intel_dp_hdcp_shim);
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index d8c6d7054d11..892d7db7d94f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -34,18 +34,6 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
link_status[3], link_status[4], link_status[5]);
}
-static int intel_dp_lttpr_count(struct intel_dp *intel_dp)
-{
- int count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
-
- /*
- * Pretend no LTTPRs in case of LTTPR detection error, or
- * if too many (>8) LTTPRs are detected. This translates to link
- * training in transparent mode.
- */
- return count <= 0 ? 0 : count;
-}
-
static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
{
intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
@@ -142,6 +130,17 @@ int intel_dp_lttpr_init(struct intel_dp *intel_dp)
return 0;
ret = intel_dp_read_lttpr_common_caps(intel_dp);
+ if (!ret)
+ return 0;
+
+ lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
+ /*
+ * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are
+ * detected as this breaks link training at least on the Dell WD19TB
+ * dock.
+ */
+ if (lttpr_count == 0)
+ return 0;
/*
* See DP Standard v2.0 3.6.6.1. about the explicit disabling of
@@ -150,17 +149,12 @@ int intel_dp_lttpr_init(struct intel_dp *intel_dp)
*/
intel_dp_set_lttpr_transparent_mode(intel_dp, true);
- if (!ret)
- return 0;
-
- lttpr_count = intel_dp_lttpr_count(intel_dp);
-
/*
* In case of unsupported number of LTTPRs or failing to switch to
* non-transparent mode fall-back to transparent link training mode,
* still taking into account any LTTPR common lane- rate/count limits.
*/
- if (lttpr_count == 0)
+ if (lttpr_count < 0)
return 0;
if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
@@ -222,11 +216,11 @@ intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- int lttpr_count = intel_dp_lttpr_count(intel_dp);
+ int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
- drm_WARN_ON_ONCE(&i915->drm, lttpr_count == 0 && dp_phy != DP_PHY_DPRX);
+ drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
- return lttpr_count == 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
+ return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
}
static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
@@ -434,7 +428,7 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
&rate_select, 1);
- link_config[0] = 0;
+ link_config[0] = crtc_state->vrr.enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
link_config[1] = DP_SET_ANSI_8B10B;
drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
@@ -697,9 +691,9 @@ static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
* @intel_dp: DP struct
* @crtc_state: state for CRTC attached to the encoder
*
- * Stop the link training of the @intel_dp port, disabling the test pattern
- * symbol generation on the port and disabling the training pattern in
- * the sink's DPCD.
+ * Stop the link training of the @intel_dp port, disabling the training
+ * pattern in the sink's DPCD, and disabling the test pattern symbol
+ * generation on the port.
*
* What symbols are output on the port after this point is
* platform specific: On DDI/VLV/CHV platforms it will be the idle pattern
@@ -713,10 +707,9 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp,
{
intel_dp->link_trained = true;
- intel_dp_program_link_training_pattern(intel_dp,
- crtc_state,
- DP_TRAINING_PATTERN_DISABLE);
intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
+ intel_dp_program_link_training_pattern(intel_dp, crtc_state,
+ DP_TRAINING_PATTERN_DISABLE);
}
static bool
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 3286b232be0b..b4621ed0127e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -53,8 +53,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
- DP_DPCD_QUIRK_CONSTANT_N);
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
int bpp, slots = -EINVAL;
crtc_state->lane_count = limits->max_lane_count;
@@ -571,7 +570,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_DESIRED)
intel_hdcp_enable(to_intel_connector(conn_state->connector),
- pipe_config->cpu_transcoder,
+ pipe_config,
(u8)conn_state->hdcp_content_type);
}
@@ -831,12 +830,11 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
-
- /* TODO: Figure out how to make HDCP work on GEN12+ */
- if (INTEL_GEN(dev_priv) < 12) {
+ if (INTEL_GEN(dev_priv) <= 12) {
ret = intel_dp_init_hdcp(dig_port, intel_connector);
if (ret)
- DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
+ drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
+ connector->name, connector->base.id);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
new file mode 100644
index 000000000000..7ba7f315aaee
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -0,0 +1,1363 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+#include "intel_display_types.h"
+#include "intel_display.h"
+#include "intel_dpll.h"
+#include "intel_lvds.h"
+#include "intel_panel.h"
+
+struct intel_limit {
+ struct {
+ int min, max;
+ } dot, vco, n, m, m1, m2, p, p1;
+
+ struct {
+ int dot_limit;
+ int p2_slow, p2_fast;
+ } p2;
+};
+static const struct intel_limit intel_limits_i8xx_dac = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 2, .max = 33 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 4, .p2_fast = 2 },
+};
+
+static const struct intel_limit intel_limits_i8xx_dvo = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 2, .max = 33 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 4, .p2_fast = 4 },
+};
+
+static const struct intel_limit intel_limits_i8xx_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 1, .max = 6 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 14, .p2_fast = 7 },
+};
+
+static const struct intel_limit intel_limits_i9xx_sdvo = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit intel_limits_i9xx_lvds = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
+ .p = { .min = 7, .max = 98 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 112000,
+ .p2_slow = 14, .p2_fast = 7 },
+};
+
+
+static const struct intel_limit intel_limits_g4x_sdvo = {
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 4 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 1, .max = 3},
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 10,
+ .p2_fast = 10
+ },
+};
+
+static const struct intel_limit intel_limits_g4x_hdmi = {
+ .dot = { .min = 22000, .max = 400000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 4 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 16, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8},
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
+ .dot = { .min = 20000, .max = 115000 },
+ .vco = { .min = 1750000, .max = 3500000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 14, .p2_fast = 14
+ },
+};
+
+static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
+ .dot = { .min = 80000, .max = 224000 },
+ .vco = { .min = 1750000, .max = 3500000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 14, .max = 42 },
+ .p1 = { .min = 2, .max = 6 },
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 7, .p2_fast = 7
+ },
+};
+
+static const struct intel_limit pnv_limits_sdvo = {
+ .dot = { .min = 20000, .max = 400000},
+ .vco = { .min = 1700000, .max = 3500000 },
+ /* Pineview's Ncounter is a ring counter */
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
+ /* Pineview only has one combined m divider, which we treat as m2. */
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit pnv_limits_lvds = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1700000, .max = 3500000 },
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 7, .max = 112 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 112000,
+ .p2_slow = 14, .p2_fast = 14 },
+};
+
+/* Ironlake / Sandybridge
+ *
+ * We calculate clock using (register_value + 2) for N/M1/M2, so here
+ * the range value for them is (actual_value - 2).
+ */
+static const struct intel_limit ilk_limits_dac = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 5 },
+ .m = { .min = 79, .max = 127 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit ilk_limits_single_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 118 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 14, .p2_fast = 14 },
+};
+
+static const struct intel_limit ilk_limits_dual_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 127 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 14, .max = 56 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 7, .p2_fast = 7 },
+};
+
+/* LVDS 100mhz refclk limits. */
+static const struct intel_limit ilk_limits_single_lvds_100m = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 2 },
+ .m = { .min = 79, .max = 126 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 14, .p2_fast = 14 },
+};
+
+static const struct intel_limit ilk_limits_dual_lvds_100m = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 126 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 14, .max = 42 },
+ .p1 = { .min = 2, .max = 6 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 7, .p2_fast = 7 },
+};
+
+static const struct intel_limit intel_limits_vlv = {
+ /*
+ * These are the data rate limits (measured in fast clocks)
+ * since those are the strictest limits we have. The fast
+ * clock and actual rate limits are more relaxed, so checking
+ * them would make no difference.
+ */
+ .dot = { .min = 25000 * 5, .max = 270000 * 5 },
+ .vco = { .min = 4000000, .max = 6000000 },
+ .n = { .min = 1, .max = 7 },
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p1 = { .min = 2, .max = 3 },
+ .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
+};
+
+static const struct intel_limit intel_limits_chv = {
+ /*
+ * These are the data rate limits (measured in fast clocks)
+ * since those are the strictest limits we have. The fast
+ * clock and actual rate limits are more relaxed, so checking
+ * them would make no difference.
+ */
+ .dot = { .min = 25000 * 5, .max = 540000 * 5},
+ .vco = { .min = 4800000, .max = 6480000 },
+ .n = { .min = 1, .max = 1 },
+ .m1 = { .min = 2, .max = 2 },
+ .m2 = { .min = 24 << 22, .max = 175 << 22 },
+ .p1 = { .min = 2, .max = 4 },
+ .p2 = { .p2_slow = 1, .p2_fast = 14 },
+};
+
+static const struct intel_limit intel_limits_bxt = {
+ /* FIXME: find real dot limits */
+ .dot = { .min = 0, .max = INT_MAX },
+ .vco = { .min = 4800000, .max = 6700000 },
+ .n = { .min = 1, .max = 1 },
+ .m1 = { .min = 2, .max = 2 },
+ /* FIXME: find real m2 limits */
+ .m2 = { .min = 2 << 22, .max = 255 << 22 },
+ .p1 = { .min = 2, .max = 4 },
+ .p2 = { .p2_slow = 1, .p2_fast = 20 },
+};
+
+/*
+ * Platform specific helpers to calculate the port PLL loopback- (clock.m),
+ * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
+ * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
+ * The helpers' return value is the rate of the clock that is fed to the
+ * display engine's pipe which can be the above fast dot clock rate or a
+ * divided-down version of it.
+ */
+/* m1 is reserved as 0 in Pineview, n is a ring counter */
+int pnv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+ clock->m = clock->m2 + 2;
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot;
+}
+
+static u32 i9xx_dpll_compute_m(struct dpll *dpll)
+{
+ return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
+}
+
+int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
+{
+ clock->m = i9xx_dpll_compute_m(clock);
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot;
+}
+
+int vlv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+ clock->m = clock->m1 * clock->m2;
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot / 5;
+}
+
+int chv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+ clock->m = clock->m1 * clock->m2;
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
+ clock->n << 22);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot / 5;
+}
+
+/*
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given connectors.
+ */
+static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
+ const struct intel_limit *limit,
+ const struct dpll *clock)
+{
+ if (clock->n < limit->n.min || limit->n.max < clock->n)
+ return false;
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ return false;
+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+ return false;
+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+ return false;
+
+ if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
+ !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
+ if (clock->m1 <= clock->m2)
+ return false;
+
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+ !IS_GEN9_LP(dev_priv)) {
+ if (clock->p < limit->p.min || limit->p.max < clock->p)
+ return false;
+ if (clock->m < limit->m.min || limit->m.max < clock->m)
+ return false;
+ }
+
+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+ return false;
+ /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
+ * connector, etc., rather than just a single range.
+ */
+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+ return false;
+
+ return true;
+}
+
+static int
+i9xx_select_p2_div(const struct intel_limit *limit,
+ const struct intel_crtc_state *crtc_state,
+ int target)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ /*
+ * For LVDS just rely on its current settings for dual-channel.
+ * We haven't figured out how to reliably set up different
+ * single/dual channel state, if we even can.
+ */
+ if (intel_is_dual_link_lvds(dev_priv))
+ return limit->p2.p2_fast;
+ else
+ return limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ return limit->p2.p2_slow;
+ else
+ return limit->p2.p2_fast;
+ }
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+i9xx_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct dpll clock;
+ int err = target;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ if (clock.m2 >= clock.m1)
+ break;
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
+ int this_err;
+
+ i9xx_calc_dpll_params(refclk, &clock);
+ if (!intel_pll_is_valid(to_i915(dev),
+ limit,
+ &clock))
+ continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+ }
+
+ return (err != target);
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+pnv_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct dpll clock;
+ int err = target;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
+ int this_err;
+
+ pnv_calc_dpll_params(refclk, &clock);
+ if (!intel_pll_is_valid(to_i915(dev),
+ limit,
+ &clock))
+ continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+ }
+
+ return (err != target);
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+g4x_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct dpll clock;
+ int max_n;
+ bool found = false;
+ /* approximately equals target * 0.00585 */
+ int err_most = (target >> 8) + (target >> 9);
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+ max_n = limit->n.max;
+ /* based on hardware requirement, prefer smaller n to precision */
+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ /* based on hardware requirement, prefere larger m1,m2 */
+ for (clock.m1 = limit->m1.max;
+ clock.m1 >= limit->m1.min; clock.m1--) {
+ for (clock.m2 = limit->m2.max;
+ clock.m2 >= limit->m2.min; clock.m2--) {
+ for (clock.p1 = limit->p1.max;
+ clock.p1 >= limit->p1.min; clock.p1--) {
+ int this_err;
+
+ i9xx_calc_dpll_params(refclk, &clock);
+ if (!intel_pll_is_valid(to_i915(dev),
+ limit,
+ &clock))
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err_most) {
+ *best_clock = clock;
+ err_most = this_err;
+ max_n = clock.n;
+ found = true;
+ }
+ }
+ }
+ }
+ }
+ return found;
+}
+
+/*
+ * Check if the calculated PLL configuration is more optimal compared to the
+ * best configuration and error found so far. Return the calculated error.
+ */
+static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
+ const struct dpll *calculated_clock,
+ const struct dpll *best_clock,
+ unsigned int best_error_ppm,
+ unsigned int *error_ppm)
+{
+ /*
+ * For CHV ignore the error and consider only the P value.
+ * Prefer a bigger P value based on HW requirements.
+ */
+ if (IS_CHERRYVIEW(to_i915(dev))) {
+ *error_ppm = 0;
+
+ return calculated_clock->p > best_clock->p;
+ }
+
+ if (drm_WARN_ON_ONCE(dev, !target_freq))
+ return false;
+
+ *error_ppm = div_u64(1000000ULL *
+ abs(target_freq - calculated_clock->dot),
+ target_freq);
+ /*
+ * Prefer a better P value over a better (smaller) error if the error
+ * is small. Ensure this preference for future configurations too by
+ * setting the error to 0.
+ */
+ if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
+ *error_ppm = 0;
+
+ return true;
+ }
+
+ return *error_ppm + 10 < best_error_ppm;
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+static bool
+vlv_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_device *dev = crtc->base.dev;
+ struct dpll clock;
+ unsigned int bestppm = 1000000;
+ /* min update 19.2 MHz */
+ int max_n = min(limit->n.max, refclk / 19200);
+ bool found = false;
+
+ target *= 5; /* fast clock */
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ /* based on hardware requirement, prefer smaller n to precision */
+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+ for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
+ clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+ clock.p = clock.p1 * clock.p2;
+ /* based on hardware requirement, prefer bigger m1,m2 values */
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+ unsigned int ppm;
+
+ clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
+ refclk * clock.m1);
+
+ vlv_calc_dpll_params(refclk, &clock);
+
+ if (!intel_pll_is_valid(to_i915(dev),
+ limit,
+ &clock))
+ continue;
+
+ if (!vlv_PLL_is_optimal(dev, target,
+ &clock,
+ best_clock,
+ bestppm, &ppm))
+ continue;
+
+ *best_clock = clock;
+ bestppm = ppm;
+ found = true;
+ }
+ }
+ }
+ }
+
+ return found;
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+static bool
+chv_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_device *dev = crtc->base.dev;
+ unsigned int best_error_ppm;
+ struct dpll clock;
+ u64 m2;
+ int found = false;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+ best_error_ppm = 1000000;
+
+ /*
+ * Based on hardware doc, the n always set to 1, and m1 always
+ * set to 2. If requires to support 200Mhz refclk, we need to
+ * revisit this because n may not 1 anymore.
+ */
+ clock.n = 1;
+ clock.m1 = 2;
+ target *= 5; /* fast clock */
+
+ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+ for (clock.p2 = limit->p2.p2_fast;
+ clock.p2 >= limit->p2.p2_slow;
+ clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+ unsigned int error_ppm;
+
+ clock.p = clock.p1 * clock.p2;
+
+ m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
+ refclk * clock.m1);
+
+ if (m2 > INT_MAX/clock.m1)
+ continue;
+
+ clock.m2 = m2;
+
+ chv_calc_dpll_params(refclk, &clock);
+
+ if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
+ continue;
+
+ if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
+ best_error_ppm, &error_ppm))
+ continue;
+
+ *best_clock = clock;
+ best_error_ppm = error_ppm;
+ found = true;
+ }
+ }
+
+ return found;
+}
+
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
+ struct dpll *best_clock)
+{
+ int refclk = 100000;
+ const struct intel_limit *limit = &intel_limits_bxt;
+
+ return chv_find_best_dpll(limit, crtc_state,
+ crtc_state->port_clock, refclk,
+ NULL, best_clock);
+}
+
+static u32 pnv_dpll_compute_fp(struct dpll *dpll)
+{
+ return (1 << dpll->n) << 16 | dpll->m2;
+}
+
+static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct dpll *reduced_clock)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 fp, fp2 = 0;
+
+ if (IS_PINEVIEW(dev_priv)) {
+ fp = pnv_dpll_compute_fp(&crtc_state->dpll);
+ if (reduced_clock)
+ fp2 = pnv_dpll_compute_fp(reduced_clock);
+ } else {
+ fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
+ if (reduced_clock)
+ fp2 = i9xx_dpll_compute_fp(reduced_clock);
+ }
+
+ crtc_state->dpll_hw_state.fp0 = fp;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ reduced_clock) {
+ crtc_state->dpll_hw_state.fp1 = fp2;
+ } else {
+ crtc_state->dpll_hw_state.fp1 = fp;
+ }
+}
+
+static void i9xx_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct dpll *reduced_clock)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dpll;
+ struct dpll *clock = &crtc_state->dpll;
+
+ i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+ IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+ dpll |= (crtc_state->pixel_multiplier - 1)
+ << SDVO_MULTIPLIER_SHIFT_HIRES;
+ }
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ if (IS_PINEVIEW(dev_priv))
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
+ else {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (IS_G4X(dev_priv) && reduced_clock)
+ dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ }
+ switch (clock->p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+ if (INTEL_GEN(dev_priv) >= 4)
+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+
+ if (crtc_state->sdvo_tv_clock)
+ dpll |= PLL_REF_INPUT_TVCLKINBC;
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv))
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ crtc_state->dpll_hw_state.dpll = dpll;
+
+ if (INTEL_GEN(dev_priv) >= 4) {
+ u32 dpll_md = (crtc_state->pixel_multiplier - 1)
+ << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ crtc_state->dpll_hw_state.dpll_md = dpll_md;
+ }
+}
+
+static void i8xx_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct dpll *reduced_clock)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 dpll;
+ struct dpll *clock = &crtc_state->dpll;
+
+ i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ } else {
+ if (clock->p1 == 2)
+ dpll |= PLL_P1_DIVIDE_BY_TWO;
+ else
+ dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (clock->p2 == 4)
+ dpll |= PLL_P2_DIVIDE_BY_4;
+ }
+
+ /*
+ * Bspec:
+ * "[Almador Errata}: For the correct operation of the muxed DVO pins
+ * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
+ * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
+ * Enable) must be set to “1” in both the DPLL A Control Register
+ * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
+ *
+ * For simplicity We simply keep both bits always enabled in
+ * both DPLLS. The spec says we should disable the DVO 2X clock
+ * when not needed, but this seems to work fine in practice.
+ */
+ if (IS_I830(dev_priv) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
+ dpll |= DPLL_DVO_2X_MODE;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv))
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ crtc_state->dpll_hw_state.dpll = dpll;
+}
+
+static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->uapi.state);
+
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
+ INTEL_GEN(dev_priv) >= 11) {
+ struct intel_encoder *encoder =
+ intel_get_crtc_new_encoder(state, crtc_state);
+
+ if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
+{
+ return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
+}
+
+
+static void ilk_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct dpll *reduced_clock)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dpll, fp, fp2;
+ int factor;
+
+ /* Enable autotuning of the PLL clock (if permissible) */
+ factor = 21;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if ((intel_panel_use_ssc(dev_priv) &&
+ dev_priv->vbt.lvds_ssc_freq == 100000) ||
+ (HAS_PCH_IBX(dev_priv) &&
+ intel_is_dual_link_lvds(dev_priv)))
+ factor = 25;
+ } else if (crtc_state->sdvo_tv_clock) {
+ factor = 20;
+ }
+
+ fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
+
+ if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
+ fp |= FP_CB_TUNE;
+
+ if (reduced_clock) {
+ fp2 = i9xx_dpll_compute_fp(reduced_clock);
+
+ if (reduced_clock->m < factor * reduced_clock->n)
+ fp2 |= FP_CB_TUNE;
+ } else {
+ fp2 = fp;
+ }
+
+ dpll = 0;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+
+ dpll |= (crtc_state->pixel_multiplier - 1)
+ << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ /*
+ * The high speed IO clock is only really required for
+ * SDVO/HDMI/DP, but we also enable it for CRT to make it
+ * possible to share the DPLL between CRT and HDMI. Enabling
+ * the clock needlessly does no real harm, except use up a
+ * bit of power potentially.
+ *
+ * We'll limit this to IVB with 3 pipes, since it has only two
+ * DPLLs and so DPLL sharing is the only way to get three pipes
+ * driving PCH ports at the same time. On SNB we could do this,
+ * and potentially avoid enabling the second DPLL, but it's not
+ * clear if it''s a win or loss power wise. No point in doing
+ * this on ILK at all since it has a fixed DPLL<->pipe mapping.
+ */
+ if (INTEL_NUM_PIPES(dev_priv) == 3 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ /* also FPA1 */
+ dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+
+ switch (crtc_state->dpll.p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv))
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+
+ crtc_state->dpll_hw_state.dpll = dpll;
+ crtc_state->dpll_hw_state.fp0 = fp;
+ crtc_state->dpll_hw_state.fp1 = fp2;
+}
+
+static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->uapi.state);
+ const struct intel_limit *limit;
+ int refclk = 120000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+ if (!crtc_state->has_pch_encoder)
+ return 0;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ dev_priv->vbt.lvds_ssc_freq);
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ }
+
+ if (intel_is_dual_link_lvds(dev_priv)) {
+ if (refclk == 100000)
+ limit = &ilk_limits_dual_lvds_100m;
+ else
+ limit = &ilk_limits_dual_lvds;
+ } else {
+ if (refclk == 100000)
+ limit = &ilk_limits_single_lvds_100m;
+ else
+ limit = &ilk_limits_single_lvds;
+ }
+ } else {
+ limit = &ilk_limits_dac;
+ }
+
+ if (!crtc_state->clock_set &&
+ !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ ilk_compute_dpll(crtc, crtc_state, NULL);
+
+ if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void vlv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (crtc->pipe != PIPE_A)
+ pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ /* DPLL not used with DSI, but still need the rest set up */
+ if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
+ pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
+ DPLL_EXT_BUFFER_ENABLE_VLV;
+
+ pipe_config->dpll_hw_state.dpll_md =
+ (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+}
+
+void chv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (crtc->pipe != PIPE_A)
+ pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ /* DPLL not used with DSI, but still need the rest set up */
+ if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
+ pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
+
+ pipe_config->dpll_hw_state.dpll_md =
+ (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+}
+
+static int chv_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ int refclk = 100000;
+ const struct intel_limit *limit = &intel_limits_chv;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (!crtc_state->clock_set &&
+ !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ chv_compute_dpll(crtc, crtc_state);
+
+ return 0;
+}
+
+static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ int refclk = 100000;
+ const struct intel_limit *limit = &intel_limits_vlv;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (!crtc_state->clock_set &&
+ !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ vlv_compute_dpll(crtc, crtc_state);
+
+ return 0;
+}
+
+static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_limit *limit;
+ int refclk = 96000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
+ }
+
+ if (intel_is_dual_link_lvds(dev_priv))
+ limit = &intel_limits_g4x_dual_channel_lvds;
+ else
+ limit = &intel_limits_g4x_single_channel_lvds;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
+ limit = &intel_limits_g4x_hdmi;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
+ limit = &intel_limits_g4x_sdvo;
+ } else {
+ /* The option is for other outputs */
+ limit = &intel_limits_i9xx_sdvo;
+ }
+
+ if (!crtc_state->clock_set &&
+ !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_limit *limit;
+ int refclk = 96000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
+ }
+
+ limit = &pnv_limits_lvds;
+ } else {
+ limit = &pnv_limits_sdvo;
+ }
+
+ if (!crtc_state->clock_set &&
+ !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_limit *limit;
+ int refclk = 96000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
+ }
+
+ limit = &intel_limits_i9xx_lvds;
+ } else {
+ limit = &intel_limits_i9xx_sdvo;
+ }
+
+ if (!crtc_state->clock_set &&
+ !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_limit *limit;
+ int refclk = 48000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
+ }
+
+ limit = &intel_limits_i8xx_lvds;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
+ limit = &intel_limits_i8xx_dvo;
+ } else {
+ limit = &intel_limits_i8xx_dac;
+ }
+
+ if (!crtc_state->clock_set &&
+ !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ i8xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+void
+intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) >= 9 || HAS_DDI(dev_priv))
+ dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ dev_priv->display.crtc_compute_clock = ilk_crtc_compute_clock;
+ else if (IS_CHERRYVIEW(dev_priv))
+ dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
+ else if (IS_VALLEYVIEW(dev_priv))
+ dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
+ else if (IS_G4X(dev_priv))
+ dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
+ else if (IS_PINEVIEW(dev_priv))
+ dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
+ else if (!IS_GEN(dev_priv, 2))
+ dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
+ else
+ dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
new file mode 100644
index 000000000000..caf4615092e1
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_DPLL_H_
+#define _INTEL_DPLL_H_
+
+struct dpll;
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+
+void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
+int vlv_calc_dpll_params(int refclk, struct dpll *clock);
+int pnv_calc_dpll_params(int refclk, struct dpll *clock);
+int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
+void vlv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+void chv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index b53c50372918..584c14c4cbd0 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -43,7 +43,7 @@
#define PANEL_PWM_MAX_VALUE 0xFF
-static u32 dcs_get_backlight(struct intel_connector *connector)
+static u32 dcs_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
@@ -77,7 +77,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32
}
}
-static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
+static void dcs_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
struct mipi_dsi_device *dsi_device;
@@ -111,10 +111,9 @@ static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
}
static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
- struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
struct mipi_dsi_device *dsi_device;
enum port port;
@@ -142,7 +141,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
&cabc, sizeof(cabc));
}
- dcs_set_backlight(conn_state, panel->backlight.level);
+ dcs_set_backlight(conn_state, level);
}
static int dcs_setup_backlight(struct intel_connector *connector,
@@ -156,6 +155,14 @@ static int dcs_setup_backlight(struct intel_connector *connector,
return 0;
}
+static const struct intel_panel_bl_funcs dcs_bl_funcs = {
+ .setup = dcs_setup_backlight,
+ .enable = dcs_enable_backlight,
+ .disable = dcs_disable_backlight,
+ .set = dcs_set_backlight,
+ .get = dcs_get_backlight,
+};
+
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
@@ -169,11 +176,7 @@ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
if (drm_WARN_ON(dev, encoder->type != INTEL_OUTPUT_DSI))
return -EINVAL;
- panel->backlight.setup = dcs_setup_backlight;
- panel->backlight.enable = dcs_enable_backlight;
- panel->backlight.disable = dcs_disable_backlight;
- panel->backlight.set = dcs_set_backlight;
- panel->backlight.get = dcs_get_backlight;
+ panel->backlight.funcs = &dcs_bl_funcs;
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 237dbb1ba0ee..090cd76266c6 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -301,12 +301,8 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
- /*I915_WRITE(DVOB_SRCDIM,
- (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
- (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
intel_de_write(dev_priv, dvo_srcdim_reg,
(adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
- /*I915_WRITE(DVOB, dvo_val);*/
intel_de_write(dev_priv, dvo_reg, dvo_val);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index a5b072816a7b..5fd4fa4805ef 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -676,7 +676,7 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
}
static bool tiling_is_valid(struct drm_i915_private *dev_priv,
- uint64_t modifier)
+ u64 modifier)
{
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
@@ -742,6 +742,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->fence_id = plane_state->vma->fence->id;
else
cache->fence_id = -1;
+
+ cache->psr2_active = crtc_state->has_psr2;
}
static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
@@ -914,6 +916,16 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
+ /*
+ * Tigerlake is not supporting FBC with PSR2.
+ * Recommendation is to keep this combination disabled
+ * Bspec: 50422 HSD: 14010260002
+ */
+ if (fbc->state_cache.psr2_active && IS_TIGERLAKE(dev_priv)) {
+ fbc->no_fbc_reason = "not supported with PSR2";
+ return false;
+ }
+
return true;
}
@@ -1433,13 +1445,6 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
if (!HAS_FBC(dev_priv))
return 0;
- /*
- * Fbc is causing random underruns in CI execution on TGL platforms.
- * Disabling the same while the problem is being debugged and analyzed.
- */
- if (IS_TIGERLAKE(dev_priv))
- return 0;
-
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
return 1;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 842c04e63214..84f853f113b9 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -256,7 +256,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
- if (vma->obj->stolen && !prealloc)
+ if (!i915_gem_object_is_shmem(vma->obj) && !prealloc)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -595,7 +595,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
* full of whatever garbage was left in there.
*/
if (state == FBINFO_STATE_RUNNING &&
- intel_fb_obj(&ifbdev->fb->base)->stolen)
+ !i915_gem_object_is_shmem(intel_fb_obj(&ifbdev->fb->base)))
memset_io(info->screen_base, 0, info->screen_size);
drm_fb_helper_set_suspend(&ifbdev->helper, state);
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
new file mode 100644
index 000000000000..b2eb96ae10a2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -0,0 +1,683 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include "intel_atomic.h"
+#include "intel_display_types.h"
+#include "intel_fdi.h"
+
+/* units of 100MHz */
+static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
+ return crtc_state->fdi_lanes;
+
+ return 0;
+}
+
+static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_atomic_state *state = pipe_config->uapi.state;
+ struct intel_crtc *other_crtc;
+ struct intel_crtc_state *other_crtc_state;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "checking fdi config on pipe %c, lanes %i\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ if (pipe_config->fdi_lanes > 4) {
+ drm_dbg_kms(&dev_priv->drm,
+ "invalid fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return -EINVAL;
+ }
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ if (pipe_config->fdi_lanes > 2) {
+ drm_dbg_kms(&dev_priv->drm,
+ "only 2 lanes on haswell, required: %i lanes\n",
+ pipe_config->fdi_lanes);
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ }
+
+ if (INTEL_NUM_PIPES(dev_priv) == 2)
+ return 0;
+
+ /* Ivybridge 3 pipe is really complicated */
+ switch (pipe) {
+ case PIPE_A:
+ return 0;
+ case PIPE_B:
+ if (pipe_config->fdi_lanes <= 2)
+ return 0;
+
+ other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
+ other_crtc_state =
+ intel_atomic_get_crtc_state(state, other_crtc);
+ if (IS_ERR(other_crtc_state))
+ return PTR_ERR(other_crtc_state);
+
+ if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "invalid shared fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return -EINVAL;
+ }
+ return 0;
+ case PIPE_C:
+ if (pipe_config->fdi_lanes > 2) {
+ drm_dbg_kms(&dev_priv->drm,
+ "only 2 lanes on pipe %c: required %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return -EINVAL;
+ }
+
+ other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
+ other_crtc_state =
+ intel_atomic_get_crtc_state(state, other_crtc);
+ if (IS_ERR(other_crtc_state))
+ return PTR_ERR(other_crtc_state);
+
+ if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
+ drm_dbg_kms(&dev_priv->drm,
+ "fdi link B uses too many lanes to enable link C\n");
+ return -EINVAL;
+ }
+ return 0;
+ default:
+ BUG();
+ }
+}
+
+int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *i915 = to_i915(dev);
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ int lane, link_bw, fdi_dotclock, ret;
+ bool needs_recompute = false;
+
+retry:
+ /* FDI is a binary signal running at ~2.7GHz, encoding
+ * each output octet as 10 bits. The actual frequency
+ * is stored as a divider into a 100MHz clock, and the
+ * mode pixel clock is stored in units of 1KHz.
+ * Hence the bw of each lane in terms of the mode signal
+ * is:
+ */
+ link_bw = intel_fdi_link_freq(i915, pipe_config);
+
+ fdi_dotclock = adjusted_mode->crtc_clock;
+
+ lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
+ pipe_config->pipe_bpp);
+
+ pipe_config->fdi_lanes = lane;
+
+ intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
+ link_bw, &pipe_config->fdi_m_n, false, false);
+
+ ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
+ if (ret == -EDEADLK)
+ return ret;
+
+ if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
+ pipe_config->pipe_bpp -= 2*3;
+ drm_dbg_kms(&i915->drm,
+ "fdi link bw constraint, reducing pipe bpp to %i\n",
+ pipe_config->pipe_bpp);
+ needs_recompute = true;
+ pipe_config->bw_constrained = true;
+
+ goto retry;
+ }
+
+ if (needs_recompute)
+ return I915_DISPLAY_CONFIG_RETRY;
+
+ return ret;
+}
+
+void intel_fdi_normal_train(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp;
+
+ /* enable normal train */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (IS_IVYBRIDGE(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+ temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+ }
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE;
+ }
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+ /* wait one idle pattern time */
+ intel_de_posting_read(dev_priv, reg);
+ udelay(1000);
+
+ /* IVB wants error correction enabled */
+ if (IS_IVYBRIDGE(dev_priv))
+ intel_de_write(dev_priv, reg,
+ intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
+}
+
+/* The FDI link training functions for ILK/Ibexpeak. */
+static void ilk_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp, tries;
+
+ /* FDI needs bits from pipe first */
+ assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ intel_de_write(dev_priv, reg, temp);
+ intel_de_read(dev_priv, reg);
+ udelay(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ /* Ironlake workaround, enable clock pointer after FDI enable*/
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR);
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
+
+ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+ if ((temp & FDI_RX_BIT_LOCK)) {
+ drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
+ intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
+ break;
+ }
+ }
+ if (tries == 5)
+ drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
+ break;
+ }
+ }
+ if (tries == 5)
+ drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
+
+ drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
+
+}
+
+static const int snb_b_fdi_train_param[] = {
+ FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+ FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+ FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+ FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+};
+
+/* The FDI link training functions for SNB/Cougarpoint. */
+static void gen6_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp, i, retry;
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+
+ intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(500);
+
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_BIT_LOCK) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 done.\n");
+ break;
+ }
+ udelay(50);
+ }
+ if (retry < 5)
+ break;
+ }
+ if (i == 4)
+ drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ if (IS_GEN(dev_priv, 6)) {
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ }
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ }
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(500);
+
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 done.\n");
+ break;
+ }
+ udelay(50);
+ }
+ if (retry < 5)
+ break;
+ }
+ if (i == 4)
+ drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
+
+ drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
+}
+
+/* Manual link training for Ivy Bridge A0 parts */
+static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp, i, j;
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
+ intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
+
+ /* Try each vswing and preemphasis setting twice before moving on */
+ for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
+ /* disable first in case we need to retry */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
+ temp &= ~FDI_TX_ENABLE;
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_AUTO;
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp &= ~FDI_RX_ENABLE;
+ intel_de_write(dev_priv, reg, temp);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[j/2];
+ temp |= FDI_COMPOSITE_SYNC;
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+
+ intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ temp |= FDI_COMPOSITE_SYNC;
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(1); /* should be 0.5us */
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_BIT_LOCK ||
+ (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 done, level %i.\n",
+ i);
+ break;
+ }
+ udelay(1); /* should be 0.5us */
+ }
+ if (i == 4) {
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 fail on vswing %d\n", j / 2);
+ continue;
+ }
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(2); /* should be 1.5us */
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK ||
+ (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 done, level %i.\n",
+ i);
+ goto train_done;
+ }
+ udelay(2); /* should be 1.5us */
+ }
+ if (i == 4)
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 fail on vswing %d\n", j / 2);
+ }
+
+train_done:
+ drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
+}
+
+void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ enum pipe pipe = intel_crtc->pipe;
+ i915_reg_t reg;
+ u32 temp;
+
+ /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(200);
+
+ /* Switch from Rawclk to PCDclk */
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(200);
+
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+ }
+}
+
+void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = intel_crtc->pipe;
+ i915_reg_t reg;
+ u32 temp;
+
+ /* Switch from PCDclk to Rawclk */
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
+
+ /* Disable CPU FDI TX PLL */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
+
+ /* Wait for the clocks to turn off. */
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+}
+
+void ilk_fdi_disable(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp;
+
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
+ intel_de_posting_read(dev_priv, reg);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~(0x7 << 16);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+
+ /* Ironlake workaround, disable clock pointer after downing FDI */
+ if (HAS_PCH_IBX(dev_priv))
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR);
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+}
+
+void
+intel_fdi_init_hook(struct drm_i915_private *dev_priv)
+{
+ if (IS_GEN(dev_priv, 5)) {
+ dev_priv->display.fdi_link_train = ilk_fdi_link_train;
+ } else if (IS_GEN(dev_priv, 6)) {
+ dev_priv->display.fdi_link_train = gen6_fdi_link_train;
+ } else if (IS_IVYBRIDGE(dev_priv)) {
+ /* FIXME: detect B0+ stepping and use auto training */
+ dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h
new file mode 100644
index 000000000000..a9cd21663eb8
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fdi.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_FDI_H_
+#define _INTEL_FDI_H_
+
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+
+#define I915_DISPLAY_CONFIG_RETRY 1
+int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *pipe_config);
+void intel_fdi_normal_train(struct intel_crtc *crtc);
+void ilk_fdi_disable(struct intel_crtc *crtc);
+void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc);
+void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state);
+void intel_fdi_init_hook(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index d898b370d7a4..7b38eee9980f 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -225,8 +225,10 @@ static void frontbuffer_release(struct kref *ref)
struct i915_vma *vma;
spin_lock(&obj->vma.lock);
- for_each_ggtt_vma(vma, obj)
+ for_each_ggtt_vma(vma, obj) {
+ i915_vma_clear_scanout(vma);
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
+ }
spin_unlock(&obj->vma.lock);
RCU_INIT_POINTER(obj->frontbuffer, NULL);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index b9d8825e2bb1..ae1371c36a32 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -15,6 +15,7 @@
#include <drm/drm_hdcp.h>
#include <drm/i915_component.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
@@ -23,9 +24,78 @@
#include "intel_connector.h"
#define KEY_LOAD_TRIES 5
-#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
#define HDCP2_LC_RETRY_CNT 3
+static int intel_conn_to_vcpi(struct intel_connector *connector)
+{
+ /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
+ return connector->port ? connector->port->vcpi.vcpi : 0;
+}
+
+/*
+ * intel_hdcp_required_content_stream selects the most highest common possible HDCP
+ * content_type for all streams in DP MST topology because security f/w doesn't
+ * have any provision to mark content_type for each stream separately, it marks
+ * all available streams with the content_type proivided at the time of port
+ * authentication. This may prohibit the userspace to use type1 content on
+ * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
+ * DP MST topology. Though it is not compulsory, security fw should change its
+ * policy to mark different content_types for different streams.
+ */
+static int
+intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct intel_digital_port *conn_dig_port;
+ struct intel_connector *connector;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ bool enforce_type0 = false;
+ int k;
+
+ data->k = 0;
+
+ if (dig_port->hdcp_auth_status)
+ return 0;
+
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ if (connector->base.status == connector_status_disconnected)
+ continue;
+
+ if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
+ continue;
+
+ conn_dig_port = intel_attached_dig_port(connector);
+ if (conn_dig_port != dig_port)
+ continue;
+
+ if (!enforce_type0 && !intel_hdcp2_capable(connector))
+ enforce_type0 = true;
+
+ data->streams[data->k].stream_id = intel_conn_to_vcpi(connector);
+ data->k++;
+
+ /* if there is only one active stream */
+ if (dig_port->dp.active_mst_links <= 1)
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
+ return -EINVAL;
+
+ /*
+ * Apply common protection level across all streams in DP MST Topology.
+ * Use highest supported content type for all streams in DP MST Topology.
+ */
+ for (k = 0; k < data->k; k++)
+ data->streams[k].stream_type =
+ enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
+
+ return 0;
+}
+
static
bool intel_hdcp_is_ksv_valid(u8 *ksv)
{
@@ -762,15 +832,22 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (intel_de_wait_for_set(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_ENC,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
return -ETIMEDOUT;
}
- /*
- * XXX: If we have MST-connected devices, we need to enable encryption
- * on those as well.
- */
+ /* DP MST Auth Part 1 Step 2.a and Step 2.b */
+ if (shim->stream_encryption) {
+ ret = shim->stream_encryption(connector, true);
+ if (ret) {
+ drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
+ connector->base.name, connector->base.base.id);
+ return ret;
+ }
+ drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
+ transcoder_name(hdcp->stream_transcoder));
+ }
if (repeater_present)
return intel_hdcp_auth_downstream(connector);
@@ -792,24 +869,29 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
connector->base.name, connector->base.base.id);
- /*
- * If there are other connectors on this port using HDCP, don't disable
- * it. Instead, toggle the HDCP signalling off on that particular
- * connector/pipe and exit.
- */
- if (dig_port->num_hdcp_streams > 0) {
- ret = hdcp->shim->toggle_signalling(dig_port,
- cpu_transcoder, false);
- if (ret)
- DRM_ERROR("Failed to disable HDCP signalling\n");
- return ret;
+ if (hdcp->shim->stream_encryption) {
+ ret = hdcp->shim->stream_encryption(connector, false);
+ if (ret) {
+ drm_err(&dev_priv->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
+ connector->base.name, connector->base.base.id);
+ return ret;
+ }
+ drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
+ transcoder_name(hdcp->stream_transcoder));
+ /*
+ * If there are other connectors on this port using HDCP,
+ * don't disable it until it disabled HDCP encryption for
+ * all connectors in MST topology.
+ */
+ if (dig_port->num_hdcp_streams > 0)
+ return 0;
}
hdcp->hdcp_encrypted = false;
intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
if (intel_de_wait_for_clear(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port),
- ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
drm_err(&dev_priv->drm,
"Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
@@ -1014,7 +1096,8 @@ static int
hdcp2_prepare_ake_init(struct intel_connector *connector,
struct hdcp2_ake_init *ake_data)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1043,7 +1126,8 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
struct hdcp2_ake_no_stored_km *ek_pub_km,
size_t *msg_sz)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1070,7 +1154,8 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
static int hdcp2_verify_hprime(struct intel_connector *connector,
struct hdcp2_ake_send_hprime *rx_hprime)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1095,7 +1180,8 @@ static int
hdcp2_store_pairing_info(struct intel_connector *connector,
struct hdcp2_ake_send_pairing_info *pairing_info)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1121,7 +1207,8 @@ static int
hdcp2_prepare_lc_init(struct intel_connector *connector,
struct hdcp2_lc_init *lc_init)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1147,7 +1234,8 @@ static int
hdcp2_verify_lprime(struct intel_connector *connector,
struct hdcp2_lc_send_lprime *rx_lprime)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1172,7 +1260,8 @@ hdcp2_verify_lprime(struct intel_connector *connector,
static int hdcp2_prepare_skey(struct intel_connector *connector,
struct hdcp2_ske_send_eks *ske_data)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1200,7 +1289,8 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
*rep_topology,
struct hdcp2_rep_send_ack *rep_send_ack)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1228,7 +1318,8 @@ static int
hdcp2_verify_mprime(struct intel_connector *connector,
struct hdcp2_rep_stream_ready *stream_ready)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1251,7 +1342,8 @@ hdcp2_verify_mprime(struct intel_connector *connector,
static int hdcp2_authenticate_port(struct intel_connector *connector)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1275,6 +1367,7 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
static int hdcp2_close_mei_session(struct intel_connector *connector)
{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1288,7 +1381,7 @@ static int hdcp2_close_mei_session(struct intel_connector *connector)
}
ret = comp->ops->close_hdcp_session(comp->mei_dev,
- &connector->hdcp.port_data);
+ &dig_port->hdcp_port_data);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1448,13 +1541,14 @@ static
int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
union {
struct hdcp2_rep_stream_manage stream_manage;
struct hdcp2_rep_stream_ready stream_ready;
} msgs;
const struct intel_hdcp_shim *shim = hdcp->shim;
- int ret;
+ int ret, streams_size_delta, i;
if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
return -ERANGE;
@@ -1463,16 +1557,18 @@ int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
- /* K no of streams is fixed as 1. Stored as big-endian. */
- msgs.stream_manage.k = cpu_to_be16(1);
+ msgs.stream_manage.k = cpu_to_be16(data->k);
- /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
- msgs.stream_manage.streams[0].stream_id = 0;
- msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
+ for (i = 0; i < data->k; i++) {
+ msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
+ msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
+ }
+ streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
+ sizeof(struct hdcp2_streamid_type);
/* Send it to Repeater */
ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
- sizeof(msgs.stream_manage));
+ sizeof(msgs.stream_manage) - streams_size_delta);
if (ret < 0)
goto out;
@@ -1481,8 +1577,8 @@ int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
if (ret < 0)
goto out;
- hdcp->port_data.seq_num_m = hdcp->seq_num_m;
- hdcp->port_data.streams[0].stream_type = hdcp->content_type;
+ data->seq_num_m = hdcp->seq_num_m;
+
ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
out:
@@ -1606,6 +1702,36 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
return ret;
}
+static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
+ enum port port = dig_port->base.port;
+ int ret = 0;
+
+ if (!(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS)) {
+ drm_err(&dev_priv->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
+ connector->base.name, connector->base.base.id);
+ return -EPERM;
+ }
+
+ if (hdcp->shim->stream_2_2_encryption) {
+ ret = hdcp->shim->stream_2_2_encryption(connector, true);
+ if (ret) {
+ drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
+ connector->base.name, connector->base.base.id);
+ return ret;
+ }
+ drm_dbg_kms(&dev_priv->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
+ transcoder_name(hdcp->stream_transcoder));
+ }
+
+ return ret;
+}
+
static int hdcp2_enable_encryption(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
@@ -1641,7 +1767,8 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
HDCP2_STATUS(dev_priv, cpu_transcoder,
port),
LINK_ENCRYPTION_STATUS,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ dig_port->hdcp_auth_status = true;
return ret;
}
@@ -1665,7 +1792,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
HDCP2_STATUS(dev_priv, cpu_transcoder,
port),
LINK_ENCRYPTION_STATUS,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
if (ret == -ETIMEDOUT)
drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
@@ -1714,11 +1841,11 @@ hdcp2_propagate_stream_management_info(struct intel_connector *connector)
static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct intel_hdcp *hdcp = &connector->hdcp;
- int ret, i, tries = 3;
+ int ret = 0, i, tries = 3;
- for (i = 0; i < tries; i++) {
+ for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
ret = hdcp2_authenticate_sink(connector);
if (!ret) {
ret = hdcp2_propagate_stream_management_info(connector);
@@ -1728,8 +1855,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
ret);
break;
}
- hdcp->port_data.streams[0].stream_type =
- hdcp->content_type;
+
ret = hdcp2_authenticate_port(connector);
if (!ret)
break;
@@ -1744,7 +1870,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
}
- if (!ret) {
+ if (!ret && !dig_port->hdcp_auth_status) {
/*
* Ensuring the required 200mSec min time interval between
* Session Key Exchange and encryption.
@@ -1759,12 +1885,16 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
}
}
+ ret = hdcp2_enable_stream_encryption(connector);
+
return ret;
}
static int _intel_hdcp2_enable(struct intel_connector *connector)
{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
@@ -1772,6 +1902,16 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
connector->base.name, connector->base.base.id,
hdcp->content_type);
+ /* Stream which requires encryption */
+ if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
+ data->k = 1;
+ data->streams[0].stream_type = hdcp->content_type;
+ } else {
+ ret = intel_hdcp_required_content_stream(dig_port);
+ if (ret)
+ return ret;
+ }
+
ret = hdcp2_authenticate_and_encrypt(connector);
if (ret) {
drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
@@ -1789,18 +1929,37 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
static int _intel_hdcp2_disable(struct intel_connector *connector)
{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
connector->base.name, connector->base.base.id);
+ if (hdcp->shim->stream_2_2_encryption) {
+ ret = hdcp->shim->stream_2_2_encryption(connector, false);
+ if (ret) {
+ drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
+ connector->base.name, connector->base.base.id);
+ return ret;
+ }
+ drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
+ transcoder_name(hdcp->stream_transcoder));
+
+ if (dig_port->num_hdcp_streams > 0)
+ return 0;
+ }
+
ret = hdcp2_disable_encryption(connector);
if (hdcp2_deauthenticate_port(connector) < 0)
drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
connector->hdcp.hdcp2_encrypted = false;
+ dig_port->hdcp_auth_status = false;
+ data->k = 0;
return ret;
}
@@ -1816,6 +1975,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
int ret = 0;
mutex_lock(&hdcp->mutex);
+ mutex_lock(&dig_port->hdcp_mutex);
cpu_transcoder = hdcp->cpu_transcoder;
/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
@@ -1837,7 +1997,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
goto out;
}
- ret = hdcp->shim->check_2_2_link(dig_port);
+ ret = hdcp->shim->check_2_2_link(dig_port, connector);
if (ret == HDCP_LINK_PROTECTED) {
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
intel_hdcp_update_value(connector,
@@ -1893,6 +2053,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
}
out:
+ mutex_unlock(&dig_port->hdcp_mutex);
mutex_unlock(&hdcp->mutex);
return ret;
}
@@ -1968,12 +2129,13 @@ static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
}
static int initialize_hdcp_port_data(struct intel_connector *connector,
- enum port port,
+ struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *shim)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
- struct hdcp_port_data *data = &hdcp->port_data;
+ enum port port = dig_port->base.port;
if (INTEL_GEN(dev_priv) < 12)
data->fw_ddi = intel_get_mei_fw_ddi_index(port);
@@ -1994,16 +2156,15 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
data->protocol = (u8)shim->protocol;
- data->k = 1;
if (!data->streams)
- data->streams = kcalloc(data->k,
+ data->streams = kcalloc(INTEL_NUM_PIPES(dev_priv),
sizeof(struct hdcp2_streamid_type),
GFP_KERNEL);
if (!data->streams) {
drm_err(&dev_priv->drm, "Out of Memory\n");
return -ENOMEM;
}
-
+ /* For SST */
data->streams[0].stream_id = 0;
data->streams[0].stream_type = hdcp->content_type;
@@ -2046,14 +2207,15 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
}
}
-static void intel_hdcp2_init(struct intel_connector *connector, enum port port,
+static void intel_hdcp2_init(struct intel_connector *connector,
+ struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *shim)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- ret = initialize_hdcp_port_data(connector, port, shim);
+ ret = initialize_hdcp_port_data(connector, dig_port, shim);
if (ret) {
drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
return;
@@ -2063,7 +2225,7 @@ static void intel_hdcp2_init(struct intel_connector *connector, enum port port,
}
int intel_hdcp_init(struct intel_connector *connector,
- enum port port,
+ struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *shim)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -2073,15 +2235,15 @@ int intel_hdcp_init(struct intel_connector *connector,
if (!shim)
return -EINVAL;
- if (is_hdcp2_supported(dev_priv) && !connector->mst_port)
- intel_hdcp2_init(connector, port, shim);
+ if (is_hdcp2_supported(dev_priv))
+ intel_hdcp2_init(connector, dig_port, shim);
ret =
drm_connector_attach_content_protection_property(&connector->base,
hdcp->hdcp2_supported);
if (ret) {
hdcp->hdcp2_supported = false;
- kfree(hdcp->port_data.streams);
+ kfree(dig_port->hdcp_port_data.streams);
return ret;
}
@@ -2095,7 +2257,7 @@ int intel_hdcp_init(struct intel_connector *connector,
}
int intel_hdcp_enable(struct intel_connector *connector,
- enum transcoder cpu_transcoder, u8 content_type)
+ const struct intel_crtc_state *pipe_config, u8 content_type)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
@@ -2106,15 +2268,28 @@ int intel_hdcp_enable(struct intel_connector *connector,
if (!hdcp->shim)
return -ENOENT;
+ if (!connector->encoder) {
+ drm_err(&dev_priv->drm, "[%s:%d] encoder is not initialized\n",
+ connector->base.name, connector->base.base.id);
+ return -ENODEV;
+ }
+
mutex_lock(&hdcp->mutex);
mutex_lock(&dig_port->hdcp_mutex);
drm_WARN_ON(&dev_priv->drm,
hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
hdcp->content_type = content_type;
- hdcp->cpu_transcoder = cpu_transcoder;
+
+ if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
+ hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
+ hdcp->stream_transcoder = pipe_config->cpu_transcoder;
+ } else {
+ hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
+ hdcp->stream_transcoder = INVALID_TRANSCODER;
+ }
if (INTEL_GEN(dev_priv) >= 12)
- hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
+ dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder);
/*
* Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
@@ -2234,7 +2409,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
if (desired_and_not_enabled || content_protection_type_changed)
intel_hdcp_enable(connector,
- crtc_state->cpu_transcoder,
+ crtc_state,
(u8)conn_state->hdcp_content_type);
}
@@ -2284,7 +2459,6 @@ void intel_hdcp_cleanup(struct intel_connector *connector)
drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
mutex_lock(&hdcp->mutex);
- kfree(hdcp->port_data.streams);
hdcp->shim = NULL;
mutex_unlock(&hdcp->mutex);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index 1bbf5b67ed0a..8f53b0c7fe5c 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
+#define HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
+
struct drm_connector;
struct drm_connector_state;
struct drm_i915_private;
@@ -16,16 +18,18 @@ struct intel_connector;
struct intel_crtc_state;
struct intel_encoder;
struct intel_hdcp_shim;
+struct intel_digital_port;
enum port;
enum transcoder;
void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state);
-int intel_hdcp_init(struct intel_connector *connector, enum port port,
+int intel_hdcp_init(struct intel_connector *connector,
+ struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *hdcp_shim);
int intel_hdcp_enable(struct intel_connector *connector,
- enum transcoder cpu_transcoder, u8 content_type);
+ const struct intel_crtc_state *pipe_config, u8 content_type);
int intel_hdcp_disable(struct intel_connector *connector);
void intel_hdcp_update_pipe(struct intel_atomic_state *state,
struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 82674a8853c6..d5f4b40a8460 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -518,10 +518,10 @@ static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
-static void hsw_write_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- unsigned int type,
- const void *frame, ssize_t len)
+void hsw_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len)
{
const u32 *data = frame;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -555,10 +555,9 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, ctl_reg);
}
-static void hsw_read_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- unsigned int type,
- void *frame, ssize_t len)
+void hsw_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type, void *frame, ssize_t len)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -1495,15 +1494,16 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector,
usleep_range(25, 50);
}
- ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
- false);
+ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder,
+ false, TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
drm_err(&dev_priv->drm,
"Disable HDCP signalling failed (%d)\n", ret);
return ret;
}
- ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
- true);
+
+ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder,
+ true, TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
drm_err(&dev_priv->drm,
"Enable HDCP signalling failed (%d)\n", ret);
@@ -1526,8 +1526,9 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
if (!enable)
usleep_range(6, 60); /* Bspec says >= 6us */
- ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
- enable);
+ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base,
+ cpu_transcoder, enable,
+ TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n",
enable ? "Enable" : "Disable", ret);
@@ -1732,7 +1733,8 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *dig_port,
}
static
-int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port)
+int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
{
u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
int ret;
@@ -2950,21 +2952,12 @@ static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_digital_port *dig_port =
- hdmi_to_dig_port(intel_hdmi);
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_attach_aspect_ratio_property(connector);
- /*
- * Attach Colorspace property for Non LSPCON based device
- * ToDo: This needs to be extended for LSPCON implementation
- * as well. Will be implemented separately.
- */
- if (!dig_port->lspcon.active)
- intel_attach_colorspace_property(connector);
-
+ intel_attach_hdmi_colorspace_property(connector);
drm_connector_attach_content_type_property(connector);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -3300,7 +3293,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
intel_hdmi->attached_connector = intel_connector;
if (is_hdcp_supported(dev_priv, port)) {
- int ret = intel_hdcp_init(intel_connector, port,
+ int ret = intel_hdcp_init(intel_connector, dig_port,
&intel_hdmi_hdcp_shim);
if (ret)
drm_dbg_kms(&dev_priv->drm,
@@ -3438,3 +3431,236 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
intel_hdmi_init_connector(dig_port, intel_connector);
}
+
+/*
+ * intel_hdmi_dsc_get_slice_height - get the dsc slice_height
+ * @vactive: Vactive of a display mode
+ *
+ * @return: appropriate dsc slice height for a given mode.
+ */
+int intel_hdmi_dsc_get_slice_height(int vactive)
+{
+ int slice_height;
+
+ /*
+ * Slice Height determination : HDMI2.1 Section 7.7.5.2
+ * Select smallest slice height >=96, that results in a valid PPS and
+ * requires minimum padding lines required for final slice.
+ *
+ * Assumption : Vactive is even.
+ */
+ for (slice_height = 96; slice_height <= vactive; slice_height += 2)
+ if (vactive % slice_height == 0)
+ return slice_height;
+
+ return 0;
+}
+
+/*
+ * intel_hdmi_dsc_get_num_slices - get no. of dsc slices based on dsc encoder
+ * and dsc decoder capabilities
+ *
+ * @crtc_state: intel crtc_state
+ * @src_max_slices: maximum slices supported by the DSC encoder
+ * @src_max_slice_width: maximum slice width supported by DSC encoder
+ * @hdmi_max_slices: maximum slices supported by sink DSC decoder
+ * @hdmi_throughput: maximum clock per slice (MHz) supported by HDMI sink
+ *
+ * @return: num of dsc slices that can be supported by the dsc encoder
+ * and decoder.
+ */
+int
+intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state,
+ int src_max_slices, int src_max_slice_width,
+ int hdmi_max_slices, int hdmi_throughput)
+{
+/* Pixel rates in KPixels/sec */
+#define HDMI_DSC_PEAK_PIXEL_RATE 2720000
+/*
+ * Rates at which the source and sink are required to process pixels in each
+ * slice, can be two levels: either atleast 340000KHz or atleast 40000KHz.
+ */
+#define HDMI_DSC_MAX_ENC_THROUGHPUT_0 340000
+#define HDMI_DSC_MAX_ENC_THROUGHPUT_1 400000
+
+/* Spec limits the slice width to 2720 pixels */
+#define MAX_HDMI_SLICE_WIDTH 2720
+ int kslice_adjust;
+ int adjusted_clk_khz;
+ int min_slices;
+ int target_slices;
+ int max_throughput; /* max clock freq. in khz per slice */
+ int max_slice_width;
+ int slice_width;
+ int pixel_clock = crtc_state->hw.adjusted_mode.crtc_clock;
+
+ if (!hdmi_throughput)
+ return 0;
+
+ /*
+ * Slice Width determination : HDMI2.1 Section 7.7.5.1
+ * kslice_adjust factor for 4:2:0, and 4:2:2 formats is 0.5, where as
+ * for 4:4:4 is 1.0. Multiplying these factors by 10 and later
+ * dividing adjusted clock value by 10.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 ||
+ crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
+ kslice_adjust = 10;
+ else
+ kslice_adjust = 5;
+
+ /*
+ * As per spec, the rate at which the source and the sink process
+ * the pixels per slice are at two levels: atleast 340Mhz or 400Mhz.
+ * This depends upon the pixel clock rate and output formats
+ * (kslice adjust).
+ * If pixel clock * kslice adjust >= 2720MHz slices can be processed
+ * at max 340MHz, otherwise they can be processed at max 400MHz.
+ */
+
+ adjusted_clk_khz = DIV_ROUND_UP(kslice_adjust * pixel_clock, 10);
+
+ if (adjusted_clk_khz <= HDMI_DSC_PEAK_PIXEL_RATE)
+ max_throughput = HDMI_DSC_MAX_ENC_THROUGHPUT_0;
+ else
+ max_throughput = HDMI_DSC_MAX_ENC_THROUGHPUT_1;
+
+ /*
+ * Taking into account the sink's capability for maximum
+ * clock per slice (in MHz) as read from HF-VSDB.
+ */
+ max_throughput = min(max_throughput, hdmi_throughput * 1000);
+
+ min_slices = DIV_ROUND_UP(adjusted_clk_khz, max_throughput);
+ max_slice_width = min(MAX_HDMI_SLICE_WIDTH, src_max_slice_width);
+
+ /*
+ * Keep on increasing the num of slices/line, starting from min_slices
+ * per line till we get such a number, for which the slice_width is
+ * just less than max_slice_width. The slices/line selected should be
+ * less than or equal to the max horizontal slices that the combination
+ * of PCON encoder and HDMI decoder can support.
+ */
+ slice_width = max_slice_width;
+
+ do {
+ if (min_slices <= 1 && src_max_slices >= 1 && hdmi_max_slices >= 1)
+ target_slices = 1;
+ else if (min_slices <= 2 && src_max_slices >= 2 && hdmi_max_slices >= 2)
+ target_slices = 2;
+ else if (min_slices <= 4 && src_max_slices >= 4 && hdmi_max_slices >= 4)
+ target_slices = 4;
+ else if (min_slices <= 8 && src_max_slices >= 8 && hdmi_max_slices >= 8)
+ target_slices = 8;
+ else if (min_slices <= 12 && src_max_slices >= 12 && hdmi_max_slices >= 12)
+ target_slices = 12;
+ else if (min_slices <= 16 && src_max_slices >= 16 && hdmi_max_slices >= 16)
+ target_slices = 16;
+ else
+ return 0;
+
+ slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, target_slices);
+ if (slice_width >= max_slice_width)
+ min_slices = target_slices + 1;
+ } while (slice_width >= max_slice_width);
+
+ return target_slices;
+}
+
+/*
+ * intel_hdmi_dsc_get_bpp - get the appropriate compressed bits_per_pixel based on
+ * source and sink capabilities.
+ *
+ * @src_fraction_bpp: fractional bpp supported by the source
+ * @slice_width: dsc slice width supported by the source and sink
+ * @num_slices: num of slices supported by the source and sink
+ * @output_format: video output format
+ * @hdmi_all_bpp: sink supports decoding of 1/16th bpp setting
+ * @hdmi_max_chunk_bytes: max bytes in a line of chunks supported by sink
+ *
+ * @return: compressed bits_per_pixel in step of 1/16 of bits_per_pixel
+ */
+int
+intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width, int num_slices,
+ int output_format, bool hdmi_all_bpp,
+ int hdmi_max_chunk_bytes)
+{
+ int max_dsc_bpp, min_dsc_bpp;
+ int target_bytes;
+ bool bpp_found = false;
+ int bpp_decrement_x16;
+ int bpp_target;
+ int bpp_target_x16;
+
+ /*
+ * Get min bpp and max bpp as per Table 7.23, in HDMI2.1 spec
+ * Start with the max bpp and keep on decrementing with
+ * fractional bpp, if supported by PCON DSC encoder
+ *
+ * for each bpp we check if no of bytes can be supported by HDMI sink
+ */
+
+ /* Assuming: bpc as 8*/
+ if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
+ min_dsc_bpp = 6;
+ max_dsc_bpp = 3 * 4; /* 3*bpc/2 */
+ } else if (output_format == INTEL_OUTPUT_FORMAT_YCBCR444 ||
+ output_format == INTEL_OUTPUT_FORMAT_RGB) {
+ min_dsc_bpp = 8;
+ max_dsc_bpp = 3 * 8; /* 3*bpc */
+ } else {
+ /* Assuming 4:2:2 encoding */
+ min_dsc_bpp = 7;
+ max_dsc_bpp = 2 * 8; /* 2*bpc */
+ }
+
+ /*
+ * Taking into account if all dsc_all_bpp supported by HDMI2.1 sink
+ * Section 7.7.34 : Source shall not enable compressed Video
+ * Transport with bpp_target settings above 12 bpp unless
+ * DSC_all_bpp is set to 1.
+ */
+ if (!hdmi_all_bpp)
+ max_dsc_bpp = min(max_dsc_bpp, 12);
+
+ /*
+ * The Sink has a limit of compressed data in bytes for a scanline,
+ * as described in max_chunk_bytes field in HFVSDB block of edid.
+ * The no. of bytes depend on the target bits per pixel that the
+ * source configures. So we start with the max_bpp and calculate
+ * the target_chunk_bytes. We keep on decrementing the target_bpp,
+ * till we get the target_chunk_bytes just less than what the sink's
+ * max_chunk_bytes, or else till we reach the min_dsc_bpp.
+ *
+ * The decrement is according to the fractional support from PCON DSC
+ * encoder. For fractional BPP we use bpp_target as a multiple of 16.
+ *
+ * bpp_target_x16 = bpp_target * 16
+ * So we need to decrement by {1, 2, 4, 8, 16} for fractional bpps
+ * {1/16, 1/8, 1/4, 1/2, 1} respectively.
+ */
+
+ bpp_target = max_dsc_bpp;
+
+ /* src does not support fractional bpp implies decrement by 16 for bppx16 */
+ if (!src_fractional_bpp)
+ src_fractional_bpp = 1;
+ bpp_decrement_x16 = DIV_ROUND_UP(16, src_fractional_bpp);
+ bpp_target_x16 = (bpp_target * 16) - bpp_decrement_x16;
+
+ while (bpp_target_x16 > (min_dsc_bpp * 16)) {
+ int bpp;
+
+ bpp = DIV_ROUND_UP(bpp_target_x16, 16);
+ target_bytes = DIV_ROUND_UP((num_slices * slice_width * bpp), 8);
+ if (target_bytes <= hdmi_max_chunk_bytes) {
+ bpp_found = true;
+ break;
+ }
+ bpp_target_x16 -= bpp_decrement_x16;
+ }
+ if (bpp_found)
+ return bpp_target_x16;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 15eb0ccde76e..fa1a9b030850 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -50,5 +50,12 @@ bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, int bpc,
bool has_hdmi_sink, bool ycbcr420_output);
+int intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width,
+ int num_slices, int output_format, bool hdmi_all_bpp,
+ int hdmi_max_chunk_bytes);
+int intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state,
+ int src_max_slices, int src_max_slice_width,
+ int hdmi_max_slices, int hdmi_throughput);
+int intel_hdmi_dsc_get_slice_height(int vactive);
#endif /* __INTEL_HDMI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index e37d45e531df..e4ff533e3a69 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -30,11 +30,15 @@
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_lspcon.h"
+#include "intel_hdmi.h"
/* LSPCON OUI Vendor ID(signatures) */
#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
#define LSPCON_VENDOR_MCA_OUI 0x0060AD
+#define DPCD_MCA_LSPCON_HDR_STATUS 0x70003
+#define DPCD_PARADE_LSPCON_HDR_STATUS 0x00511
+
/* AUX addresses to write MCA AVI IF */
#define LSPCON_MCA_AVI_IF_WRITE_OFFSET 0x5C0
#define LSPCON_MCA_AVI_IF_CTRL 0x5DF
@@ -104,6 +108,35 @@ static bool lspcon_detect_vendor(struct intel_lspcon *lspcon)
return true;
}
+static u32 get_hdr_status_reg(struct intel_lspcon *lspcon)
+{
+ if (lspcon->vendor == LSPCON_VENDOR_MCA)
+ return DPCD_MCA_LSPCON_HDR_STATUS;
+ else
+ return DPCD_PARADE_LSPCON_HDR_STATUS;
+}
+
+void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon)
+{
+ struct intel_digital_port *dig_port =
+ container_of(lspcon, struct intel_digital_port, lspcon);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct intel_dp *dp = lspcon_to_intel_dp(lspcon);
+ u8 hdr_caps;
+ int ret;
+
+ ret = drm_dp_dpcd_read(&dp->aux, get_hdr_status_reg(lspcon),
+ &hdr_caps, 1);
+
+ if (ret < 0) {
+ drm_dbg_kms(dev, "HDR capability detection failed\n");
+ lspcon->hdr_supported = false;
+ } else if (hdr_caps & 0x1) {
+ drm_dbg_kms(dev, "LSPCON capable of HDR\n");
+ lspcon->hdr_supported = true;
+ }
+}
+
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
{
enum drm_lspcon_mode current_mode;
@@ -418,27 +451,32 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- bool ret;
+ bool ret = true;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
- /* LSPCON only needs AVI IF */
- if (type != HDMI_INFOFRAME_TYPE_AVI)
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ if (lspcon->vendor == LSPCON_VENDOR_MCA)
+ ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
+ frame, len);
+ else
+ ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
+ frame, len);
+ break;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ drm_dbg_kms(encoder->base.dev, "Update HDR metadata for lspcon\n");
+ /* It uses the legacy hsw implementation for the same */
+ hsw_write_infoframe(encoder, crtc_state, type, frame, len);
+ break;
+ default:
return;
-
- if (lspcon->vendor == LSPCON_VENDOR_MCA)
- ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
- frame, len);
- else
- ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
- frame, len);
+ }
if (!ret) {
- DRM_ERROR("Failed to write AVI infoframes\n");
+ DRM_ERROR("Failed to write infoframes\n");
return;
}
-
- DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n");
}
void lspcon_read_infoframe(struct intel_encoder *encoder,
@@ -446,7 +484,10 @@ void lspcon_read_infoframe(struct intel_encoder *encoder,
unsigned int type,
void *frame, ssize_t len)
{
- /* FIXME implement this */
+ /* FIXME implement for AVI Infoframe as well */
+ if (type == HDMI_PACKET_TYPE_GAMUT_METADATA)
+ hsw_read_infoframe(encoder, crtc_state, type,
+ frame, len);
}
void lspcon_set_infoframes(struct intel_encoder *encoder,
@@ -491,12 +532,26 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
else
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
- drm_hdmi_avi_infoframe_quant_range(&frame.avi,
- conn_state->connector,
- adjusted_mode,
- crtc_state->limited_color_range ?
- HDMI_QUANTIZATION_RANGE_LIMITED :
- HDMI_QUANTIZATION_RANGE_FULL);
+ /* Set the Colorspace as per the HDMI spec */
+ drm_hdmi_avi_infoframe_colorspace(&frame.avi, conn_state);
+
+ /* nonsense combination */
+ drm_WARN_ON(encoder->base.dev, crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
+ drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+ conn_state->connector,
+ adjusted_mode,
+ crtc_state->limited_color_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL);
+ } else {
+ frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ frame.avi.ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ }
+
+ drm_hdmi_avi_infoframe_content_type(&frame.avi, conn_state);
ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
if (ret < 0) {
@@ -508,11 +563,64 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
buf, ret);
}
+static bool _lspcon_read_avi_infoframe_enabled_mca(struct drm_dp_aux *aux)
+{
+ int ret;
+ u32 val = 0;
+ u16 reg = LSPCON_MCA_AVI_IF_CTRL;
+
+ ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+ if (ret < 0) {
+ DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ return val & LSPCON_MCA_AVI_IF_KICKOFF;
+}
+
+static bool _lspcon_read_avi_infoframe_enabled_parade(struct drm_dp_aux *aux)
+{
+ int ret;
+ u32 val = 0;
+ u16 reg = LSPCON_PARADE_AVI_IF_CTRL;
+
+ ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+ if (ret < 0) {
+ DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ return val & LSPCON_PARADE_AVI_IF_KICKOFF;
+}
+
u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- /* FIXME actually read this from the hw */
- return 0;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ bool infoframes_enabled;
+ u32 val = 0;
+ u32 mask, tmp;
+
+ if (lspcon->vendor == LSPCON_VENDOR_MCA)
+ infoframes_enabled = _lspcon_read_avi_infoframe_enabled_mca(&intel_dp->aux);
+ else
+ infoframes_enabled = _lspcon_read_avi_infoframe_enabled_parade(&intel_dp->aux);
+
+ if (infoframes_enabled)
+ val |= intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+ if (lspcon->hdr_supported) {
+ tmp = intel_de_read(dev_priv,
+ HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
+ mask = VIDEO_DIP_ENABLE_GMP_HSW;
+
+ if (tmp & mask)
+ val |= intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
+ }
+
+ return val;
}
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
@@ -520,7 +628,7 @@ void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON);
}
-static bool lspcon_init(struct intel_digital_port *dig_port)
+bool lspcon_init(struct intel_digital_port *dig_port)
{
struct intel_dp *dp = &dig_port->dp;
struct intel_lspcon *lspcon = &dig_port->lspcon;
@@ -550,6 +658,14 @@ static bool lspcon_init(struct intel_digital_port *dig_port)
return true;
}
+u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ return dig_port->infoframes_enabled(encoder, pipe_config);
+}
+
void lspcon_resume(struct intel_digital_port *dig_port)
{
struct intel_lspcon *lspcon = &dig_port->lspcon;
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.h b/drivers/gpu/drm/i915/display/intel_lspcon.h
index b03dcb7076d8..e19e10492b05 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.h
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.h
@@ -15,6 +15,8 @@ struct intel_digital_port;
struct intel_encoder;
struct intel_lspcon;
+bool lspcon_init(struct intel_digital_port *dig_port);
+void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon);
void lspcon_resume(struct intel_digital_port *dig_port);
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
void lspcon_write_infoframe(struct intel_encoder *encoder,
@@ -31,5 +33,15 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state);
u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
+u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config);
+void hsw_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len);
+void hsw_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len);
#endif /* __INTEL_LSPCON_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index b73d51e766ce..f455040fa989 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -29,6 +29,7 @@
#include <drm/drm_fourcc.h>
#include "gem/i915_gem_pm.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index d64fce1a17cb..5fdf52643150 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -511,40 +511,79 @@ static u32 scale_hw_to_user(struct intel_connector *connector,
0, user_max);
}
-static u32 intel_panel_compute_brightness(struct intel_connector *connector,
- u32 val)
+u32 intel_panel_invert_pwm_level(struct intel_connector *connector, u32 val)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
if (dev_priv->params.invert_brightness < 0)
return val;
if (dev_priv->params.invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
- return panel->backlight.max - val + panel->backlight.min;
+ return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
}
return val;
}
-static u32 lpt_get_backlight(struct intel_connector *connector)
+void intel_panel_set_pwm_level(const struct drm_connector_state *conn_state, u32 val)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", val);
+ panel->backlight.pwm_funcs->set(conn_state, val);
+}
+
+u32 intel_panel_backlight_level_to_pwm(struct intel_connector *connector, u32 val)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
+
+ val = scale(val, panel->backlight.min, panel->backlight.max,
+ panel->backlight.pwm_level_min, panel->backlight.pwm_level_max);
+
+ return intel_panel_invert_pwm_level(connector, val);
+}
+
+u32 intel_panel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
+
+ if (dev_priv->params.invert_brightness > 0 ||
+ (dev_priv->params.invert_brightness == 0 && dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS))
+ val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
+
+ return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
+ panel->backlight.min, panel->backlight.max);
+}
+
+static u32 lpt_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
return intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
}
-static u32 pch_get_backlight(struct intel_connector *connector)
+static u32 pch_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
return intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
}
-static u32 i9xx_get_backlight(struct intel_connector *connector)
+static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
@@ -564,23 +603,17 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
return val;
}
-static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe)
+static u32 vlv_get_backlight(struct intel_connector *connector, enum pipe pipe)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
return 0;
return intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
}
-static u32 vlv_get_backlight(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum pipe pipe = intel_connector_get_pipe(connector);
-
- return _vlv_get_backlight(dev_priv, pipe);
-}
-
-static u32 bxt_get_backlight(struct intel_connector *connector)
+static u32 bxt_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
@@ -589,7 +622,7 @@ static u32 bxt_get_backlight(struct intel_connector *connector)
BXT_BLC_PWM_DUTY(panel->backlight.controller));
}
-static u32 pwm_get_backlight(struct intel_connector *connector)
+static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct intel_panel *panel = &connector->panel;
struct pwm_state state;
@@ -624,12 +657,12 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
struct intel_panel *panel = &connector->panel;
u32 tmp, mask;
- drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
if (panel->backlight.combination_mode) {
u8 lbpc;
- lbpc = level * 0xfe / panel->backlight.max + 1;
+ lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1;
level /= lbpc;
pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
}
@@ -666,7 +699,7 @@ static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32
BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
}
-static void pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
@@ -681,10 +714,9 @@ intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state,
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", level);
+ drm_dbg_kms(&i915->drm, "set backlight level = %d\n", level);
- level = intel_panel_compute_brightness(connector, level);
- panel->backlight.set(conn_state, level);
+ panel->backlight.funcs->set(conn_state, level);
}
/* set backlight brightness to level in range [0..max], assuming hw min is
@@ -726,13 +758,13 @@ void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state
mutex_unlock(&dev_priv->backlight_lock);
}
-static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, level);
/*
* Although we don't support or enable CPU PWM with LPT/SPT based
@@ -754,13 +786,13 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta
intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
}
-static void pch_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
@@ -769,44 +801,44 @@ static void pch_disable_backlight(const struct drm_connector_state *old_conn_sta
intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
}
-static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
}
-static void i965_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct drm_i915_private *dev_priv = to_i915(old_conn_state->connector->dev);
u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
tmp = intel_de_read(dev_priv, BLC_PWM_CTL2);
intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
}
-static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe;
u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
tmp & ~BLM_PWM_ENABLE);
}
-static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 tmp, val;
+ u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
tmp = intel_de_read(dev_priv,
BXT_BLC_PWM_CTL(panel->backlight.controller));
@@ -820,14 +852,14 @@ static void bxt_disable_backlight(const struct drm_connector_state *old_conn_sta
}
}
-static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
tmp = intel_de_read(dev_priv,
BXT_BLC_PWM_CTL(panel->backlight.controller));
@@ -835,7 +867,7 @@ static void cnp_disable_backlight(const struct drm_connector_state *old_conn_sta
tmp & ~BXT_BLC_PWM_ENABLE);
}
-static void pwm_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct intel_panel *panel = &connector->panel;
@@ -870,13 +902,13 @@ void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_st
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
panel->backlight.enabled = false;
- panel->backlight.disable(old_conn_state);
+ panel->backlight.funcs->disable(old_conn_state, 0);
mutex_unlock(&dev_priv->backlight_lock);
}
static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -906,7 +938,7 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, SOUTH_CHICKEN1, schicken);
}
- pch_ctl2 = panel->backlight.max << 16;
+ pch_ctl2 = panel->backlight.pwm_level_max << 16;
intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
@@ -923,11 +955,11 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
pch_ctl1 | BLM_PCH_PWM_ENABLE);
/* This won't stick until the above enable. */
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
}
static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -958,9 +990,9 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
/* This won't stick until the above enable. */
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
- pch_ctl2 = panel->backlight.max << 16;
+ pch_ctl2 = panel->backlight.pwm_level_max << 16;
intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
@@ -974,7 +1006,7 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
}
static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -987,7 +1019,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, BLC_PWM_CTL, 0);
}
- freq = panel->backlight.max;
+ freq = panel->backlight.pwm_level_max;
if (panel->backlight.combination_mode)
freq /= 0xff;
@@ -1001,7 +1033,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_posting_read(dev_priv, BLC_PWM_CTL);
/* XXX: combine this into above write? */
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
/*
* Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
@@ -1013,7 +1045,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
}
static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1028,7 +1060,7 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
}
- freq = panel->backlight.max;
+ freq = panel->backlight.pwm_level_max;
if (panel->backlight.combination_mode)
freq /= 0xff;
@@ -1044,11 +1076,11 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_posting_read(dev_priv, BLC_PWM_CTL2);
intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
}
static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1063,11 +1095,11 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
}
- ctl = panel->backlight.max << 16;
+ ctl = panel->backlight.pwm_level_max << 16;
intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl);
/* XXX: combine this into above write? */
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
ctl2 = 0;
if (panel->backlight.active_low_pwm)
@@ -1079,7 +1111,7 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
}
static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1116,9 +1148,9 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv,
BXT_BLC_PWM_FREQ(panel->backlight.controller),
- panel->backlight.max);
+ panel->backlight.pwm_level_max);
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
pwm_ctl = 0;
if (panel->backlight.active_low_pwm)
@@ -1133,7 +1165,7 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
}
static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1152,9 +1184,9 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv,
BXT_BLC_PWM_FREQ(panel->backlight.controller),
- panel->backlight.max);
+ panel->backlight.pwm_level_max);
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
pwm_ctl = 0;
if (panel->backlight.active_low_pwm)
@@ -1168,14 +1200,12 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
pwm_ctl | BXT_BLC_PWM_ENABLE);
}
-static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_panel *panel = &connector->panel;
- int level = panel->backlight.level;
- level = intel_panel_compute_brightness(connector, level);
pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
panel->backlight.pwm_state.enabled = true;
pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
@@ -1198,7 +1228,7 @@ static void __intel_panel_enable_backlight(const struct intel_crtc_state *crtc_s
panel->backlight.device->props.max_brightness);
}
- panel->backlight.enable(crtc_state, conn_state);
+ panel->backlight.funcs->enable(crtc_state, conn_state, panel->backlight.level);
panel->backlight.enabled = true;
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_UNBLANK;
@@ -1233,10 +1263,8 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
mutex_lock(&dev_priv->backlight_lock);
- if (panel->backlight.enabled) {
- val = panel->backlight.get(connector);
- val = intel_panel_compute_brightness(connector, val);
- }
+ if (panel->backlight.enabled)
+ val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector));
mutex_unlock(&dev_priv->backlight_lock);
@@ -1567,13 +1595,13 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv);
u32 pwm;
- if (!panel->backlight.hz_to_pwm) {
+ if (!panel->backlight.pwm_funcs->hz_to_pwm) {
drm_dbg_kms(&dev_priv->drm,
"backlight frequency conversion not supported\n");
return 0;
}
- pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
+ pwm = panel->backlight.pwm_funcs->hz_to_pwm(connector, pwm_freq_hz);
if (!pwm) {
drm_dbg_kms(&dev_priv->drm,
"backlight frequency conversion failed\n");
@@ -1592,7 +1620,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
struct intel_panel *panel = &connector->panel;
int min;
- drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
/*
* XXX: If the vbt value is 255, it makes min equal to max, which leads
@@ -1609,7 +1637,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
}
/* vbt value is a coefficient in range [0..255] */
- return scale(min, 0, 255, 0, panel->backlight.max);
+ return scale(min, 0, 255, 0, panel->backlight.pwm_level_max);
}
static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
@@ -1629,29 +1657,27 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
- panel->backlight.max = pch_ctl2 >> 16;
+ panel->backlight.pwm_level_max = pch_ctl2 >> 16;
cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
- panel->backlight.min = get_backlight_min_vbt(connector);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
+ panel->backlight.pwm_enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
- cpu_mode = panel->backlight.enabled && HAS_PCH_LPT(dev_priv) &&
+ cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(dev_priv) &&
!(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
(cpu_ctl2 & BLM_PWM_ENABLE);
- if (cpu_mode)
- val = pch_get_backlight(connector);
- else
- val = lpt_get_backlight(connector);
if (cpu_mode) {
+ val = pch_get_backlight(connector, unused);
+
drm_dbg_kms(&dev_priv->drm,
"CPU backlight register was enabled, switching to PCH override\n");
@@ -1664,10 +1690,6 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
cpu_ctl2 & ~BLM_PWM_ENABLE);
}
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
-
return 0;
}
@@ -1675,29 +1697,24 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
+ u32 cpu_ctl2, pch_ctl1, pch_ctl2;
pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
- panel->backlight.max = pch_ctl2 >> 16;
+ panel->backlight.pwm_level_max = pch_ctl2 >> 16;
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
- panel->backlight.min = get_backlight_min_vbt(connector);
-
- val = pch_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
- panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
+ panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
(pch_ctl1 & BLM_PCH_PWM_ENABLE);
return 0;
@@ -1717,27 +1734,26 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
if (IS_PINEVIEW(dev_priv))
panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
- panel->backlight.max = ctl >> 17;
+ panel->backlight.pwm_level_max = ctl >> 17;
- if (!panel->backlight.max) {
- panel->backlight.max = get_backlight_max_vbt(connector);
- panel->backlight.max >>= 1;
+ if (!panel->backlight.pwm_level_max) {
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+ panel->backlight.pwm_level_max >>= 1;
}
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
if (panel->backlight.combination_mode)
- panel->backlight.max *= 0xff;
+ panel->backlight.pwm_level_max *= 0xff;
- panel->backlight.min = get_backlight_min_vbt(connector);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- val = i9xx_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
+ val = i9xx_get_backlight(connector, unused);
+ val = intel_panel_invert_pwm_level(connector, val);
+ val = clamp(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max);
- panel->backlight.enabled = val != 0;
+ panel->backlight.pwm_enabled = val != 0;
return 0;
}
@@ -1746,32 +1762,27 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 ctl, ctl2, val;
+ u32 ctl, ctl2;
ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
- panel->backlight.max = ctl >> 16;
+ panel->backlight.pwm_level_max = ctl >> 16;
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
if (panel->backlight.combination_mode)
- panel->backlight.max *= 0xff;
+ panel->backlight.pwm_level_max *= 0xff;
- panel->backlight.min = get_backlight_min_vbt(connector);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- val = i9xx_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
-
- panel->backlight.enabled = ctl2 & BLM_PWM_ENABLE;
+ panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
return 0;
}
@@ -1780,7 +1791,7 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 ctl, ctl2, val;
+ u32 ctl, ctl2;
if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
return -ENODEV;
@@ -1789,22 +1800,17 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
ctl = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe));
- panel->backlight.max = ctl >> 16;
+ panel->backlight.pwm_level_max = ctl >> 16;
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
- panel->backlight.min = get_backlight_min_vbt(connector);
-
- val = _vlv_get_backlight(dev_priv, pipe);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- panel->backlight.enabled = ctl2 & BLM_PWM_ENABLE;
+ panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
return 0;
}
@@ -1829,24 +1835,18 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
}
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
- panel->backlight.max =
- intel_de_read(dev_priv,
- BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ panel->backlight.pwm_level_max =
+ intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
- panel->backlight.min = get_backlight_min_vbt(connector);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- val = bxt_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
-
- panel->backlight.enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+ panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
return 0;
}
@@ -1856,7 +1856,7 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 pwm_ctl, val;
+ u32 pwm_ctl;
/*
* CNP has the BXT implementation of backlight, but with only one
@@ -1869,30 +1869,24 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
BXT_BLC_PWM_CTL(panel->backlight.controller));
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
- panel->backlight.max =
- intel_de_read(dev_priv,
- BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ panel->backlight.pwm_level_max =
+ intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
- panel->backlight.min = get_backlight_min_vbt(connector);
-
- val = bxt_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- panel->backlight.enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+ panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
return 0;
}
-static int pwm_setup_backlight(struct intel_connector *connector,
- enum pipe pipe)
+static int ext_pwm_setup_backlight(struct intel_connector *connector,
+ enum pipe pipe)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1916,8 +1910,8 @@ static int pwm_setup_backlight(struct intel_connector *connector,
return -ENODEV;
}
- panel->backlight.max = 100; /* 100% */
- panel->backlight.min = get_backlight_min_vbt(connector);
+ panel->backlight.pwm_level_max = 100; /* 100% */
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
if (pwm_is_enabled(panel->backlight.pwm)) {
/* PWM is already enabled, use existing settings */
@@ -1925,10 +1919,8 @@ static int pwm_setup_backlight(struct intel_connector *connector,
level = pwm_get_relative_duty_cycle(&panel->backlight.pwm_state,
100);
- level = intel_panel_compute_brightness(connector, level);
- panel->backlight.level = clamp(level, panel->backlight.min,
- panel->backlight.max);
- panel->backlight.enabled = true;
+ level = intel_panel_invert_pwm_level(connector, level);
+ panel->backlight.pwm_enabled = true;
drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
@@ -1944,6 +1936,58 @@ static int pwm_setup_backlight(struct intel_connector *connector,
return 0;
}
+static void intel_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->set(conn_state,
+ intel_panel_invert_pwm_level(connector, level));
+}
+
+static u32 intel_pwm_get_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return intel_panel_invert_pwm_level(connector,
+ panel->backlight.pwm_funcs->get(connector, pipe));
+}
+
+static void intel_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->enable(crtc_state, conn_state,
+ intel_panel_invert_pwm_level(connector, level));
+}
+
+static void intel_pwm_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->disable(conn_state,
+ intel_panel_invert_pwm_level(connector, level));
+}
+
+static int intel_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct intel_panel *panel = &connector->panel;
+ int ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+
+ if (ret < 0)
+ return ret;
+
+ panel->backlight.min = panel->backlight.pwm_level_min;
+ panel->backlight.max = panel->backlight.pwm_level_max;
+ panel->backlight.level = intel_pwm_get_backlight(connector, pipe);
+ panel->backlight.enabled = panel->backlight.pwm_enabled;
+
+ return 0;
+}
+
void intel_panel_update_backlight(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -1982,12 +2026,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
}
/* ensure intel_panel has been initialized first */
- if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.setup))
+ if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.funcs))
return -ENODEV;
/* set level and max in panel struct */
mutex_lock(&dev_priv->backlight_lock);
- ret = panel->backlight.setup(intel_connector, pipe);
+ ret = panel->backlight.funcs->setup(intel_connector, pipe);
mutex_unlock(&dev_priv->backlight_lock);
if (ret) {
@@ -2017,6 +2061,94 @@ static void intel_panel_destroy_backlight(struct intel_panel *panel)
panel->backlight.present = false;
}
+static const struct intel_panel_bl_funcs bxt_pwm_funcs = {
+ .setup = bxt_setup_backlight,
+ .enable = bxt_enable_backlight,
+ .disable = bxt_disable_backlight,
+ .set = bxt_set_backlight,
+ .get = bxt_get_backlight,
+ .hz_to_pwm = bxt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs cnp_pwm_funcs = {
+ .setup = cnp_setup_backlight,
+ .enable = cnp_enable_backlight,
+ .disable = cnp_disable_backlight,
+ .set = bxt_set_backlight,
+ .get = bxt_get_backlight,
+ .hz_to_pwm = cnp_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs lpt_pwm_funcs = {
+ .setup = lpt_setup_backlight,
+ .enable = lpt_enable_backlight,
+ .disable = lpt_disable_backlight,
+ .set = lpt_set_backlight,
+ .get = lpt_get_backlight,
+ .hz_to_pwm = lpt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs spt_pwm_funcs = {
+ .setup = lpt_setup_backlight,
+ .enable = lpt_enable_backlight,
+ .disable = lpt_disable_backlight,
+ .set = lpt_set_backlight,
+ .get = lpt_get_backlight,
+ .hz_to_pwm = spt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs pch_pwm_funcs = {
+ .setup = pch_setup_backlight,
+ .enable = pch_enable_backlight,
+ .disable = pch_disable_backlight,
+ .set = pch_set_backlight,
+ .get = pch_get_backlight,
+ .hz_to_pwm = pch_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs ext_pwm_funcs = {
+ .setup = ext_pwm_setup_backlight,
+ .enable = ext_pwm_enable_backlight,
+ .disable = ext_pwm_disable_backlight,
+ .set = ext_pwm_set_backlight,
+ .get = ext_pwm_get_backlight,
+};
+
+static const struct intel_panel_bl_funcs vlv_pwm_funcs = {
+ .setup = vlv_setup_backlight,
+ .enable = vlv_enable_backlight,
+ .disable = vlv_disable_backlight,
+ .set = vlv_set_backlight,
+ .get = vlv_get_backlight,
+ .hz_to_pwm = vlv_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs i965_pwm_funcs = {
+ .setup = i965_setup_backlight,
+ .enable = i965_enable_backlight,
+ .disable = i965_disable_backlight,
+ .set = i9xx_set_backlight,
+ .get = i9xx_get_backlight,
+ .hz_to_pwm = i965_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs i9xx_pwm_funcs = {
+ .setup = i9xx_setup_backlight,
+ .enable = i9xx_enable_backlight,
+ .disable = i9xx_disable_backlight,
+ .set = i9xx_set_backlight,
+ .get = i9xx_get_backlight,
+ .hz_to_pwm = i9xx_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs pwm_bl_funcs = {
+ .setup = intel_pwm_setup_backlight,
+ .enable = intel_pwm_enable_backlight,
+ .disable = intel_pwm_disable_backlight,
+ .set = intel_pwm_set_backlight,
+ .get = intel_pwm_get_backlight,
+};
+
/* Set up chip specific backlight functions */
static void
intel_panel_init_backlight_funcs(struct intel_panel *panel)
@@ -2025,75 +2157,39 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
container_of(panel, struct intel_connector, panel);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
- intel_dp_aux_init_backlight_funcs(connector) == 0)
- return;
-
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
intel_dsi_dcs_init_backlight_funcs(connector) == 0)
return;
if (IS_GEN9_LP(dev_priv)) {
- panel->backlight.setup = bxt_setup_backlight;
- panel->backlight.enable = bxt_enable_backlight;
- panel->backlight.disable = bxt_disable_backlight;
- panel->backlight.set = bxt_set_backlight;
- panel->backlight.get = bxt_get_backlight;
- panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
+ panel->backlight.pwm_funcs = &bxt_pwm_funcs;
} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) {
- panel->backlight.setup = cnp_setup_backlight;
- panel->backlight.enable = cnp_enable_backlight;
- panel->backlight.disable = cnp_disable_backlight;
- panel->backlight.set = bxt_set_backlight;
- panel->backlight.get = bxt_get_backlight;
- panel->backlight.hz_to_pwm = cnp_hz_to_pwm;
+ panel->backlight.pwm_funcs = &cnp_pwm_funcs;
} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) {
- panel->backlight.setup = lpt_setup_backlight;
- panel->backlight.enable = lpt_enable_backlight;
- panel->backlight.disable = lpt_disable_backlight;
- panel->backlight.set = lpt_set_backlight;
- panel->backlight.get = lpt_get_backlight;
if (HAS_PCH_LPT(dev_priv))
- panel->backlight.hz_to_pwm = lpt_hz_to_pwm;
+ panel->backlight.pwm_funcs = &lpt_pwm_funcs;
else
- panel->backlight.hz_to_pwm = spt_hz_to_pwm;
+ panel->backlight.pwm_funcs = &spt_pwm_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- panel->backlight.setup = pch_setup_backlight;
- panel->backlight.enable = pch_enable_backlight;
- panel->backlight.disable = pch_disable_backlight;
- panel->backlight.set = pch_set_backlight;
- panel->backlight.get = pch_get_backlight;
- panel->backlight.hz_to_pwm = pch_hz_to_pwm;
+ panel->backlight.pwm_funcs = &pch_pwm_funcs;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
- panel->backlight.setup = pwm_setup_backlight;
- panel->backlight.enable = pwm_enable_backlight;
- panel->backlight.disable = pwm_disable_backlight;
- panel->backlight.set = pwm_set_backlight;
- panel->backlight.get = pwm_get_backlight;
+ panel->backlight.pwm_funcs = &ext_pwm_funcs;
} else {
- panel->backlight.setup = vlv_setup_backlight;
- panel->backlight.enable = vlv_enable_backlight;
- panel->backlight.disable = vlv_disable_backlight;
- panel->backlight.set = vlv_set_backlight;
- panel->backlight.get = vlv_get_backlight;
- panel->backlight.hz_to_pwm = vlv_hz_to_pwm;
+ panel->backlight.pwm_funcs = &vlv_pwm_funcs;
}
} else if (IS_GEN(dev_priv, 4)) {
- panel->backlight.setup = i965_setup_backlight;
- panel->backlight.enable = i965_enable_backlight;
- panel->backlight.disable = i965_disable_backlight;
- panel->backlight.set = i9xx_set_backlight;
- panel->backlight.get = i9xx_get_backlight;
- panel->backlight.hz_to_pwm = i965_hz_to_pwm;
+ panel->backlight.pwm_funcs = &i965_pwm_funcs;
} else {
- panel->backlight.setup = i9xx_setup_backlight;
- panel->backlight.enable = i9xx_enable_backlight;
- panel->backlight.disable = i9xx_disable_backlight;
- panel->backlight.set = i9xx_set_backlight;
- panel->backlight.get = i9xx_get_backlight;
- panel->backlight.hz_to_pwm = i9xx_hz_to_pwm;
+ panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
}
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
+ intel_dp_aux_init_backlight_funcs(connector) == 0)
+ return;
+
+ /* We're using a standard PWM backlight interface */
+ panel->backlight.funcs = &pwm_bl_funcs;
}
enum drm_connector_status
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index 5b813fe90557..1d340f77bffc 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -49,6 +49,10 @@ struct drm_display_mode *
intel_panel_edid_fixed_mode(struct intel_connector *connector);
struct drm_display_mode *
intel_panel_vbt_fixed_mode(struct intel_connector *connector);
+void intel_panel_set_pwm_level(const struct drm_connector_state *conn_state, u32 level);
+u32 intel_panel_invert_pwm_level(struct intel_connector *connector, u32 level);
+u32 intel_panel_backlight_level_to_pwm(struct intel_connector *connector, u32 level);
+u32 intel_panel_backlight_level_from_pwm(struct intel_connector *connector, u32 val);
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
int intel_backlight_device_register(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
new file mode 100644
index 000000000000..c4867a8020a5
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -0,0 +1,1406 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_pps.h"
+
+static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+
+static void pps_init_delays(struct intel_dp *intel_dp);
+static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
+
+intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ /*
+ * See intel_pps_reset_all() why we need a power domain reference here.
+ */
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
+ mutex_lock(&dev_priv->pps_mutex);
+
+ return wakeref;
+}
+
+intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
+ intel_wakeref_t wakeref)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ mutex_unlock(&dev_priv->pps_mutex);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+
+ return 0;
+}
+
+static void
+vlv_power_sequencer_kick(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum pipe pipe = intel_dp->pps.pps_pipe;
+ bool pll_enabled, release_cl_override = false;
+ enum dpio_phy phy = DPIO_PHY(pipe);
+ enum dpio_channel ch = vlv_pipe_to_channel(pipe);
+ u32 DP;
+
+ if (drm_WARN(&dev_priv->drm,
+ intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
+ "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name);
+
+ /* Preserve the BIOS-computed detected bit. This is
+ * supposed to be read-only.
+ */
+ DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
+ DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ DP |= DP_PORT_WIDTH(1);
+ DP |= DP_LINK_TRAIN_PAT_1;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ DP |= DP_PIPE_SEL_CHV(pipe);
+ else
+ DP |= DP_PIPE_SEL(pipe);
+
+ pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
+
+ /*
+ * The DPLL for the pipe must be enabled for this to work.
+ * So enable temporarily it if it's not already enabled.
+ */
+ if (!pll_enabled) {
+ release_cl_override = IS_CHERRYVIEW(dev_priv) &&
+ !chv_phy_powergate_ch(dev_priv, phy, ch, true);
+
+ if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
+ drm_err(&dev_priv->drm,
+ "Failed to force on pll for pipe %c!\n",
+ pipe_name(pipe));
+ return;
+ }
+ }
+
+ /*
+ * Similar magic as in intel_dp_enable_port().
+ * We _must_ do this port enable + disable trick
+ * to make this power sequencer lock onto the port.
+ * Otherwise even VDD force bit won't work.
+ */
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ if (!pll_enabled) {
+ vlv_force_pll_off(dev_priv, pipe);
+
+ if (release_cl_override)
+ chv_phy_powergate_ch(dev_priv, phy, ch, false);
+ }
+}
+
+static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+ unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
+
+ /*
+ * We don't have power sequencer currently.
+ * Pick one that's not used by other ports.
+ */
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (encoder->type == INTEL_OUTPUT_EDP) {
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->pps.active_pipe != INVALID_PIPE &&
+ intel_dp->pps.active_pipe !=
+ intel_dp->pps.pps_pipe);
+
+ if (intel_dp->pps.pps_pipe != INVALID_PIPE)
+ pipes &= ~(1 << intel_dp->pps.pps_pipe);
+ } else {
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->pps.pps_pipe != INVALID_PIPE);
+
+ if (intel_dp->pps.active_pipe != INVALID_PIPE)
+ pipes &= ~(1 << intel_dp->pps.active_pipe);
+ }
+ }
+
+ if (pipes == 0)
+ return INVALID_PIPE;
+
+ return ffs(pipes) - 1;
+}
+
+static enum pipe
+vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum pipe pipe;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* We should never land here with regular DP ports */
+ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
+ intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
+
+ if (intel_dp->pps.pps_pipe != INVALID_PIPE)
+ return intel_dp->pps.pps_pipe;
+
+ pipe = vlv_find_free_pps(dev_priv);
+
+ /*
+ * Didn't find one. This should not happen since there
+ * are two power sequencers and up to two eDP ports.
+ */
+ if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
+ pipe = PIPE_A;
+
+ vlv_steal_power_sequencer(dev_priv, pipe);
+ intel_dp->pps.pps_pipe = pipe;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(intel_dp->pps.pps_pipe),
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+
+ /* init power sequencer on this pipe and port */
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, true);
+
+ /*
+ * Even vdd force doesn't work until we've made
+ * the power sequencer lock in on the port.
+ */
+ vlv_power_sequencer_kick(intel_dp);
+
+ return intel_dp->pps.pps_pipe;
+}
+
+static int
+bxt_power_sequencer_idx(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ int backlight_controller = dev_priv->vbt.backlight.controller;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* We should never land here with regular DP ports */
+ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
+
+ if (!intel_dp->pps.pps_reset)
+ return backlight_controller;
+
+ intel_dp->pps.pps_reset = false;
+
+ /*
+ * Only the HW needs to be reprogrammed, the SW state is fixed and
+ * has been setup during connector init.
+ */
+ pps_init_registers(intel_dp, false);
+
+ return backlight_controller;
+}
+
+typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+
+static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
+}
+
+static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
+}
+
+static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ return true;
+}
+
+static enum pipe
+vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
+ enum port port,
+ vlv_pipe_check pipe_check)
+{
+ enum pipe pipe;
+
+ for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
+ u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
+ PANEL_PORT_SELECT_MASK;
+
+ if (port_sel != PANEL_PORT_SELECT_VLV(port))
+ continue;
+
+ if (!pipe_check(dev_priv, pipe))
+ continue;
+
+ return pipe;
+ }
+
+ return INVALID_PIPE;
+}
+
+static void
+vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum port port = dig_port->base.port;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* try to find a pipe with this port selected */
+ /* first pick one where the panel is on */
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ vlv_pipe_has_pp_on);
+ /* didn't find one? pick one where vdd is on */
+ if (intel_dp->pps.pps_pipe == INVALID_PIPE)
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ vlv_pipe_has_vdd_on);
+ /* didn't find one? pick one with just the correct port */
+ if (intel_dp->pps.pps_pipe == INVALID_PIPE)
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ vlv_pipe_any);
+
+ /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
+ if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
+ drm_dbg_kms(&dev_priv->drm,
+ "no initial power sequencer for [ENCODER:%d:%s]\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+ return;
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name,
+ pipe_name(intel_dp->pps.pps_pipe));
+}
+
+void intel_pps_reset_all(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ !(IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) ||
+ IS_GEN9_LP(dev_priv))))
+ return;
+
+ /*
+ * We can't grab pps_mutex here due to deadlock with power_domain
+ * mutex when power_domain functions are called while holding pps_mutex.
+ * That also means that in order to use pps_pipe the code needs to
+ * hold both a power domain reference and pps_mutex, and the power domain
+ * reference get/put must be done while _not_ holding pps_mutex.
+ * pps_{lock,unlock}() do these steps in the correct order, so one
+ * should use them always.
+ */
+
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->pps.active_pipe != INVALID_PIPE);
+
+ if (encoder->type != INTEL_OUTPUT_EDP)
+ continue;
+
+ if (IS_GEN9_LP(dev_priv))
+ intel_dp->pps.pps_reset = true;
+ else
+ intel_dp->pps.pps_pipe = INVALID_PIPE;
+ }
+}
+
+struct pps_registers {
+ i915_reg_t pp_ctrl;
+ i915_reg_t pp_stat;
+ i915_reg_t pp_on;
+ i915_reg_t pp_off;
+ i915_reg_t pp_div;
+};
+
+static void intel_pps_get_registers(struct intel_dp *intel_dp,
+ struct pps_registers *regs)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ int pps_idx = 0;
+
+ memset(regs, 0, sizeof(*regs));
+
+ if (IS_GEN9_LP(dev_priv))
+ pps_idx = bxt_power_sequencer_idx(intel_dp);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ pps_idx = vlv_power_sequencer_pipe(intel_dp);
+
+ regs->pp_ctrl = PP_CONTROL(pps_idx);
+ regs->pp_stat = PP_STATUS(pps_idx);
+ regs->pp_on = PP_ON_DELAYS(pps_idx);
+ regs->pp_off = PP_OFF_DELAYS(pps_idx);
+
+ /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
+ if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ regs->pp_div = INVALID_MMIO_REG;
+ else
+ regs->pp_div = PP_DIVISOR(pps_idx);
+}
+
+static i915_reg_t
+_pp_ctrl_reg(struct intel_dp *intel_dp)
+{
+ struct pps_registers regs;
+
+ intel_pps_get_registers(intel_dp, &regs);
+
+ return regs.pp_ctrl;
+}
+
+static i915_reg_t
+_pp_stat_reg(struct intel_dp *intel_dp)
+{
+ struct pps_registers regs;
+
+ intel_pps_get_registers(intel_dp, &regs);
+
+ return regs.pp_stat;
+}
+
+static bool edp_have_panel_power(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_dp->pps.pps_pipe == INVALID_PIPE)
+ return false;
+
+ return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
+}
+
+static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_dp->pps.pps_pipe == INVALID_PIPE)
+ return false;
+
+ return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
+}
+
+void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
+ drm_WARN(&dev_priv->drm, 1,
+ "eDP powered off while attempting aux channel communication.\n");
+ drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
+ intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
+ intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
+ }
+}
+
+#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
+#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
+
+#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
+#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
+
+#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
+#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+
+static void intel_pps_verify_state(struct intel_dp *intel_dp);
+
+static void wait_panel_status(struct intel_dp *intel_dp,
+ u32 mask,
+ u32 value)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ i915_reg_t pp_stat_reg, pp_ctrl_reg;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ intel_pps_verify_state(intel_dp);
+
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "mask %08x value %08x status %08x control %08x\n",
+ mask, value,
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
+
+ if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
+ mask, value, 5000))
+ drm_err(&dev_priv->drm,
+ "Panel status timeout: status %08x control %08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
+
+ drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
+}
+
+static void wait_panel_on(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
+ wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
+}
+
+static void wait_panel_off(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
+ wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
+}
+
+static void wait_panel_power_cycle(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ ktime_t panel_power_on_time;
+ s64 panel_power_off_duration;
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
+
+ /* take the difference of currrent time and panel power off time
+ * and then make panel wait for t11_t12 if needed. */
+ panel_power_on_time = ktime_get_boottime();
+ panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
+
+ /* When we disable the VDD override bit last we have to do the manual
+ * wait. */
+ if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
+ wait_remaining_ms_from_jiffies(jiffies,
+ intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
+
+ wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+}
+
+void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ wait_panel_power_cycle(intel_dp);
+}
+
+static void wait_backlight_on(struct intel_dp *intel_dp)
+{
+ wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on,
+ intel_dp->pps.backlight_on_delay);
+}
+
+static void edp_wait_backlight_off(struct intel_dp *intel_dp)
+{
+ wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off,
+ intel_dp->pps.backlight_off_delay);
+}
+
+/* Read the current pp_control value, unlocking the register if it
+ * is locked
+ */
+
+static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 control;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
+ if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
+ (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
+ control &= ~PANEL_UNLOCK_MASK;
+ control |= PANEL_UNLOCK_REGS;
+ }
+ return control;
+}
+
+/*
+ * Must be paired with intel_pps_vdd_off_unlocked().
+ * Must hold pps_mutex around the whole on/off sequence.
+ * Can be nested with intel_pps_vdd_{on,off}() calls.
+ */
+bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ u32 pp;
+ i915_reg_t pp_stat_reg, pp_ctrl_reg;
+ bool need_to_disable = !intel_dp->pps.want_panel_vdd;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return false;
+
+ cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
+ intel_dp->pps.want_panel_vdd = true;
+
+ if (edp_have_panel_vdd(intel_dp))
+ return need_to_disable;
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
+ intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
+ intel_aux_power_domain(dig_port));
+
+ drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+
+ if (!edp_have_panel_power(intel_dp))
+ wait_panel_power_cycle(intel_dp);
+
+ pp = ilk_get_pp_control(intel_dp);
+ pp |= EDP_FORCE_VDD;
+
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
+ /*
+ * If the panel wasn't on, delay before accessing aux channel
+ */
+ if (!edp_have_panel_power(intel_dp)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] panel power wasn't enabled\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+ msleep(intel_dp->pps.panel_power_up_delay);
+ }
+
+ return need_to_disable;
+}
+
+/*
+ * Must be paired with intel_pps_off().
+ * Nested calls to these functions are not allowed since
+ * we drop the lock. Caller must use some higher level
+ * locking to prevent nested calls from other threads.
+ */
+void intel_pps_vdd_on(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+ bool vdd;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ vdd = false;
+ with_intel_pps_lock(intel_dp, wakeref)
+ vdd = intel_pps_vdd_on_unlocked(intel_dp);
+ I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
+}
+
+static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port =
+ dp_to_dig_port(intel_dp);
+ u32 pp;
+ i915_reg_t pp_stat_reg, pp_ctrl_reg;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
+
+ if (!edp_have_panel_vdd(intel_dp))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+
+ pp = ilk_get_pp_control(intel_dp);
+ pp &= ~EDP_FORCE_VDD;
+
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+
+ /* Make sure sequencer is idle before allowing subsequent activity */
+ drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
+
+ if ((pp & PANEL_POWER_ON) == 0)
+ intel_dp->pps.panel_power_off_time = ktime_get_boottime();
+
+ intel_display_power_put(dev_priv,
+ intel_aux_power_domain(dig_port),
+ fetch_and_zero(&intel_dp->pps.vdd_wakeref));
+}
+
+void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work);
+ /*
+ * vdd might still be enabled due to the delayed vdd off.
+ * Make sure vdd is actually turned off here.
+ */
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_pps_vdd_off_sync_unlocked(intel_dp);
+}
+
+static void edp_panel_vdd_work(struct work_struct *__work)
+{
+ struct intel_pps *pps = container_of(to_delayed_work(__work),
+ struct intel_pps, panel_vdd_work);
+ struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
+ intel_wakeref_t wakeref;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ if (!intel_dp->pps.want_panel_vdd)
+ intel_pps_vdd_off_sync_unlocked(intel_dp);
+ }
+}
+
+static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
+{
+ unsigned long delay;
+
+ /*
+ * Queue the timer to fire a long time from now (relative to the power
+ * down delay) to keep the panel power up across a sequence of
+ * operations.
+ */
+ delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
+ schedule_delayed_work(&intel_dp->pps.panel_vdd_work, delay);
+}
+
+/*
+ * Must be paired with edp_panel_vdd_on().
+ * Must hold pps_mutex around the whole on/off sequence.
+ * Can be nested with intel_pps_vdd_{on,off}() calls.
+ */
+void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ I915_STATE_WARN(!intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
+
+ intel_dp->pps.want_panel_vdd = false;
+
+ if (sync)
+ intel_pps_vdd_off_sync_unlocked(intel_dp);
+ else
+ edp_panel_vdd_schedule_off(intel_dp);
+}
+
+void intel_pps_on_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 pp;
+ i915_reg_t pp_ctrl_reg;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
+
+ if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
+ "[ENCODER:%d:%s] panel power already on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name))
+ return;
+
+ wait_panel_power_cycle(intel_dp);
+
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
+ if (IS_GEN(dev_priv, 5)) {
+ /* ILK workaround: disable reset around power sequence */
+ pp &= ~PANEL_POWER_RESET;
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ }
+
+ pp |= PANEL_POWER_ON;
+ if (!IS_GEN(dev_priv, 5))
+ pp |= PANEL_POWER_RESET;
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+
+ wait_panel_on(intel_dp);
+ intel_dp->pps.last_power_on = jiffies;
+
+ if (IS_GEN(dev_priv, 5)) {
+ pp |= PANEL_POWER_RESET; /* restore panel reset bit */
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ }
+}
+
+void intel_pps_on(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_pps_on_unlocked(intel_dp);
+}
+
+void intel_pps_off_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ u32 pp;
+ i915_reg_t pp_ctrl_reg;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
+
+ drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
+ "Need [ENCODER:%d:%s] VDD to turn off panel\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
+
+ pp = ilk_get_pp_control(intel_dp);
+ /* We need to switch off panel power _and_ force vdd, for otherwise some
+ * panels get very unhappy and cease to work. */
+ pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
+ EDP_BLC_ENABLE);
+
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ intel_dp->pps.want_panel_vdd = false;
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+
+ wait_panel_off(intel_dp);
+ intel_dp->pps.panel_power_off_time = ktime_get_boottime();
+
+ /* We got a reference when we enabled the VDD. */
+ intel_display_power_put(dev_priv,
+ intel_aux_power_domain(dig_port),
+ fetch_and_zero(&intel_dp->pps.vdd_wakeref));
+}
+
+void intel_pps_off(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_pps_off_unlocked(intel_dp);
+}
+
+/* Enable backlight in the panel power control. */
+void intel_pps_backlight_on(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ /*
+ * If we enable the backlight right away following a panel power
+ * on, we may see slight flicker as the panel syncs with the eDP
+ * link. So delay a bit to make sure the image is solid before
+ * allowing it to appear.
+ */
+ wait_backlight_on(intel_dp);
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ u32 pp;
+
+ pp = ilk_get_pp_control(intel_dp);
+ pp |= EDP_BLC_ENABLE;
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ }
+}
+
+/* Disable backlight in the panel power control. */
+void intel_pps_backlight_off(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ u32 pp;
+
+ pp = ilk_get_pp_control(intel_dp);
+ pp &= ~EDP_BLC_ENABLE;
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ }
+
+ intel_dp->pps.last_backlight_off = jiffies;
+ edp_wait_backlight_off(intel_dp);
+}
+
+/*
+ * Hook for controlling the panel power control backlight through the bl_power
+ * sysfs attribute. Take care to handle multiple calls.
+ */
+void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ intel_wakeref_t wakeref;
+ bool is_enabled;
+
+ is_enabled = false;
+ with_intel_pps_lock(intel_dp, wakeref)
+ is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
+ if (is_enabled == enable)
+ return;
+
+ drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
+ enable ? "enable" : "disable");
+
+ if (enable)
+ intel_pps_backlight_on(intel_dp);
+ else
+ intel_pps_backlight_off(intel_dp);
+}
+
+static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum pipe pipe = intel_dp->pps.pps_pipe;
+ i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
+
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
+ return;
+
+ intel_pps_vdd_off_sync_unlocked(intel_dp);
+
+ /*
+ * VLV seems to get confused when multiple power sequencers
+ * have the same port selected (even if only one has power/vdd
+ * enabled). The failure manifests as vlv_wait_port_ready() failing
+ * CHV on the other hand doesn't seem to mind having the same port
+ * selected in multiple power sequencers, but let's clear the
+ * port select always when logically disconnecting a power sequencer
+ * from a port.
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name);
+ intel_de_write(dev_priv, pp_on_reg, 0);
+ intel_de_posting_read(dev_priv, pp_on_reg);
+
+ intel_dp->pps.pps_pipe = INVALID_PIPE;
+}
+
+static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct intel_encoder *encoder;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
+ "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
+
+ if (intel_dp->pps.pps_pipe != pipe)
+ continue;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
+
+ /* make sure vdd is off before we steal it */
+ vlv_detach_power_sequencer(intel_dp);
+ }
+}
+
+void vlv_pps_init(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
+
+ if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
+ intel_dp->pps.pps_pipe != crtc->pipe) {
+ /*
+ * If another power sequencer was being used on this
+ * port previously make sure to turn off vdd there while
+ * we still have control of it.
+ */
+ vlv_detach_power_sequencer(intel_dp);
+ }
+
+ /*
+ * We may be stealing the power
+ * sequencer from another port.
+ */
+ vlv_steal_power_sequencer(dev_priv, crtc->pipe);
+
+ intel_dp->pps.active_pipe = crtc->pipe;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ /* now it's all ours */
+ intel_dp->pps.pps_pipe = crtc->pipe;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(intel_dp->pps.pps_pipe), encoder->base.base.id,
+ encoder->base.name);
+
+ /* init power sequencer on this pipe and port */
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, true);
+}
+
+static void intel_pps_vdd_sanitize(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!edp_have_panel_vdd(intel_dp))
+ return;
+
+ /*
+ * The VDD bit needs a power domain reference, so if the bit is
+ * already enabled when we boot or resume, grab this reference and
+ * schedule a vdd off, so we don't hold on to the reference
+ * indefinitely.
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "VDD left on by BIOS, adjusting state tracking\n");
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
+ intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
+ intel_aux_power_domain(dig_port));
+
+ edp_panel_vdd_schedule_off(intel_dp);
+}
+
+bool intel_pps_have_power(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+ bool have_power = false;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ have_power = edp_have_panel_power(intel_dp) &&
+ edp_have_panel_vdd(intel_dp);
+ }
+
+ return have_power;
+}
+
+static void pps_init_timestamps(struct intel_dp *intel_dp)
+{
+ intel_dp->pps.panel_power_off_time = ktime_get_boottime();
+ intel_dp->pps.last_power_on = jiffies;
+ intel_dp->pps.last_backlight_off = jiffies;
+}
+
+static void
+intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 pp_on, pp_off, pp_ctl;
+ struct pps_registers regs;
+
+ intel_pps_get_registers(intel_dp, &regs);
+
+ pp_ctl = ilk_get_pp_control(intel_dp);
+
+ /* Ensure PPS is unlocked */
+ if (!HAS_DDI(dev_priv))
+ intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
+
+ pp_on = intel_de_read(dev_priv, regs.pp_on);
+ pp_off = intel_de_read(dev_priv, regs.pp_off);
+
+ /* Pull timing values out of registers */
+ seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
+ seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
+ seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
+ seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
+
+ if (i915_mmio_reg_valid(regs.pp_div)) {
+ u32 pp_div;
+
+ pp_div = intel_de_read(dev_priv, regs.pp_div);
+
+ seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
+ } else {
+ seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
+ }
+}
+
+static void
+intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
+{
+ DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ state_name,
+ seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
+}
+
+static void
+intel_pps_verify_state(struct intel_dp *intel_dp)
+{
+ struct edp_power_seq hw;
+ struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
+
+ intel_pps_readout_hw_state(intel_dp, &hw);
+
+ if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
+ hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
+ DRM_ERROR("PPS state mismatch\n");
+ intel_pps_dump_state("sw", sw);
+ intel_pps_dump_state("hw", &hw);
+ }
+}
+
+static void pps_init_delays(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct edp_power_seq cur, vbt, spec,
+ *final = &intel_dp->pps.pps_delays;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* already initialized? */
+ if (final->t11_t12 != 0)
+ return;
+
+ intel_pps_readout_hw_state(intel_dp, &cur);
+
+ intel_pps_dump_state("cur", &cur);
+
+ vbt = dev_priv->vbt.edp.pps;
+ /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
+ * of 500ms appears to be too short. Ocassionally the panel
+ * just fails to power back on. Increasing the delay to 800ms
+ * seems sufficient to avoid this problem.
+ */
+ if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
+ vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
+ drm_dbg_kms(&dev_priv->drm,
+ "Increasing T12 panel delay as per the quirk to %d\n",
+ vbt.t11_t12);
+ }
+ /* T11_T12 delay is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+ vbt.t11_t12 += 100 * 10;
+
+ /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
+ * our hw here, which are all in 100usec. */
+ spec.t1_t3 = 210 * 10;
+ spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
+ spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
+ spec.t10 = 500 * 10;
+ /* This one is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+ spec.t11_t12 = (510 + 100) * 10;
+
+ intel_pps_dump_state("vbt", &vbt);
+
+ /* Use the max of the register settings and vbt. If both are
+ * unset, fall back to the spec limits. */
+#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
+ spec.field : \
+ max(cur.field, vbt.field))
+ assign_final(t1_t3);
+ assign_final(t8);
+ assign_final(t9);
+ assign_final(t10);
+ assign_final(t11_t12);
+#undef assign_final
+
+#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
+ intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
+ intel_dp->pps.backlight_on_delay = get_delay(t8);
+ intel_dp->pps.backlight_off_delay = get_delay(t9);
+ intel_dp->pps.panel_power_down_delay = get_delay(t10);
+ intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
+#undef get_delay
+
+ drm_dbg_kms(&dev_priv->drm,
+ "panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->pps.panel_power_up_delay,
+ intel_dp->pps.panel_power_down_delay,
+ intel_dp->pps.panel_power_cycle_delay);
+
+ drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
+ intel_dp->pps.backlight_on_delay,
+ intel_dp->pps.backlight_off_delay);
+
+ /*
+ * We override the HW backlight delays to 1 because we do manual waits
+ * on them. For T8, even BSpec recommends doing it. For T9, if we
+ * don't do this, we'll end up waiting for the backlight off delay
+ * twice: once when we do the manual sleep, and once when we disable
+ * the panel and wait for the PP_STATUS bit to become zero.
+ */
+ final->t8 = 1;
+ final->t9 = 1;
+
+ /*
+ * HW has only a 100msec granularity for t11_t12 so round it up
+ * accordingly.
+ */
+ final->t11_t12 = roundup(final->t11_t12, 100 * 10);
+}
+
+static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 pp_on, pp_off, port_sel = 0;
+ int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
+ struct pps_registers regs;
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
+ const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ intel_pps_get_registers(intel_dp, &regs);
+
+ /*
+ * On some VLV machines the BIOS can leave the VDD
+ * enabled even on power sequencers which aren't
+ * hooked up to any port. This would mess up the
+ * power domain tracking the first time we pick
+ * one of these power sequencers for use since
+ * intel_pps_vdd_on_unlocked() would notice that the VDD was
+ * already on and therefore wouldn't grab the power
+ * domain reference. Disable VDD first to avoid this.
+ * This also avoids spuriously turning the VDD on as
+ * soon as the new power sequencer gets initialized.
+ */
+ if (force_disable_vdd) {
+ u32 pp = ilk_get_pp_control(intel_dp);
+
+ drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
+ "Panel power already on\n");
+
+ if (pp & EDP_FORCE_VDD)
+ drm_dbg_kms(&dev_priv->drm,
+ "VDD already on, disabling first\n");
+
+ pp &= ~EDP_FORCE_VDD;
+
+ intel_de_write(dev_priv, regs.pp_ctrl, pp);
+ }
+
+ pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
+ pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
+
+ /* Haswell doesn't have any port selection bits for the panel
+ * power sequencer any more. */
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ port_sel = PANEL_PORT_SELECT_VLV(port);
+ } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+ switch (port) {
+ case PORT_A:
+ port_sel = PANEL_PORT_SELECT_DPA;
+ break;
+ case PORT_C:
+ port_sel = PANEL_PORT_SELECT_DPC;
+ break;
+ case PORT_D:
+ port_sel = PANEL_PORT_SELECT_DPD;
+ break;
+ default:
+ MISSING_CASE(port);
+ break;
+ }
+ }
+
+ pp_on |= port_sel;
+
+ intel_de_write(dev_priv, regs.pp_on, pp_on);
+ intel_de_write(dev_priv, regs.pp_off, pp_off);
+
+ /*
+ * Compute the divisor for the pp clock, simply match the Bspec formula.
+ */
+ if (i915_mmio_reg_valid(regs.pp_div)) {
+ intel_de_write(dev_priv, regs.pp_div,
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
+ } else {
+ u32 pp_ctl;
+
+ pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
+ pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
+ pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
+ intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+ intel_de_read(dev_priv, regs.pp_on),
+ intel_de_read(dev_priv, regs.pp_off),
+ i915_mmio_reg_valid(regs.pp_div) ?
+ intel_de_read(dev_priv, regs.pp_div) :
+ (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
+}
+
+void intel_pps_encoder_reset(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ /*
+ * Reinit the power sequencer also on the resume path, in case
+ * BIOS did something nasty with it.
+ */
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ vlv_initial_power_sequencer_setup(intel_dp);
+
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, false);
+
+ intel_pps_vdd_sanitize(intel_dp);
+ }
+}
+
+void intel_pps_init(struct intel_dp *intel_dp)
+{
+ INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
+
+ pps_init_timestamps(intel_dp);
+
+ intel_pps_encoder_reset(intel_dp);
+}
+
+void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
+{
+ int pps_num;
+ int pps_idx;
+
+ if (HAS_DDI(dev_priv))
+ return;
+ /*
+ * This w/a is needed at least on CPT/PPT, but to be sure apply it
+ * everywhere where registers can be write protected.
+ */
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ pps_num = 2;
+ else
+ pps_num = 1;
+
+ for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
+ u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
+
+ val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
+ intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
+ }
+}
+
+void intel_pps_setup(struct drm_i915_private *i915)
+{
+ if (HAS_PCH_SPLIT(i915) || IS_GEN9_LP(i915))
+ i915->pps_mmio_base = PCH_PPS_BASE;
+ else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ i915->pps_mmio_base = VLV_PPS_BASE;
+ else
+ i915->pps_mmio_base = PPS_BASE;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h
new file mode 100644
index 000000000000..fbbcca782e7b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pps.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_PPS_H__
+#define __INTEL_PPS_H__
+
+#include <linux/types.h>
+
+#include "intel_wakeref.h"
+
+struct drm_i915_private;
+struct intel_connector;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_encoder;
+
+intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp);
+intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref);
+
+#define with_intel_pps_lock(dp, wf) \
+ for ((wf) = intel_pps_lock(dp); (wf); (wf) = intel_pps_unlock((dp), (wf)))
+
+void intel_pps_backlight_on(struct intel_dp *intel_dp);
+void intel_pps_backlight_off(struct intel_dp *intel_dp);
+void intel_pps_backlight_power(struct intel_connector *connector, bool enable);
+
+bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp);
+void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync);
+void intel_pps_on_unlocked(struct intel_dp *intel_dp);
+void intel_pps_off_unlocked(struct intel_dp *intel_dp);
+void intel_pps_check_power_unlocked(struct intel_dp *intel_dp);
+
+void intel_pps_vdd_on(struct intel_dp *intel_dp);
+void intel_pps_on(struct intel_dp *intel_dp);
+void intel_pps_off(struct intel_dp *intel_dp);
+void intel_pps_vdd_off_sync(struct intel_dp *intel_dp);
+bool intel_pps_have_power(struct intel_dp *intel_dp);
+void intel_pps_wait_power_cycle(struct intel_dp *intel_dp);
+
+void intel_pps_init(struct intel_dp *intel_dp);
+void intel_pps_encoder_reset(struct intel_dp *intel_dp);
+void intel_pps_reset_all(struct drm_i915_private *i915);
+
+void vlv_pps_init(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+
+void intel_pps_unlock_regs_wa(struct drm_i915_private *i915);
+void intel_pps_setup(struct drm_i915_private *i915);
+
+#endif /* __INTEL_PPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index b3631b722de3..850cb7f5b332 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -28,9 +28,10 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_display_types.h"
+#include "intel_dp_aux.h"
+#include "intel_hdmi.h"
#include "intel_psr.h"
#include "intel_sprite.h"
-#include "intel_hdmi.h"
/**
* DOC: Panel Self Refresh (PSR/SRD)
@@ -305,7 +306,7 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
- if (drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_NO_PSR)) {
+ if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
drm_dbg_kms(&dev_priv->drm,
"PSR support not currently available for this panel\n");
return;
@@ -811,6 +812,13 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
&crtc_state->hw.adjusted_mode;
int psr_setup_time;
+ /*
+ * Current PSR panels dont work reliably with VRR enabled
+ * So if VRR is enabled, do not enable PSR.
+ */
+ if (crtc_state->vrr.enable)
+ return;
+
if (!CAN_PSR(dev_priv))
return;
@@ -1185,7 +1193,9 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
- u32 val;
+ const struct drm_rect *clip;
+ u32 val, offset;
+ int ret, x, y;
if (!crtc_state->enable_psr2_sel_fetch)
return;
@@ -1196,16 +1206,25 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
if (!val || plane->id == PLANE_CURSOR)
return;
- val = plane_state->uapi.dst.y1 << 16 | plane_state->uapi.dst.x1;
+ clip = &plane_state->psr2_sel_fetch_area;
+
+ val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
+ val |= plane_state->uapi.dst.x1;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
- val = plane_state->color_plane[color_plane].y << 16;
- val |= plane_state->color_plane[color_plane].x;
+ /* TODO: consider auxiliary surfaces */
+ x = plane_state->uapi.src.x1 >> 16;
+ y = (plane_state->uapi.src.y1 >> 16) + clip->y1;
+ ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
+ if (ret)
+ drm_warn_once(&dev_priv->drm, "skl_calc_main_surface_offset() returned %i\n",
+ ret);
+ val = y << 16 | x;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
val);
/* Sizes are 0 based */
- val = ((drm_rect_height(&plane_state->uapi.src) >> 16) - 1) << 16;
+ val = (drm_rect_height(clip) - 1) << 16;
val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
}
@@ -1237,9 +1256,11 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
if (clip->y1 == -1)
goto exit;
+ drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
+
val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
- val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(DIV_ROUND_UP(clip->y2, 4) + 1);
+ val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
exit:
crtc_state->psr2_man_track_ctl = val;
}
@@ -1264,8 +1285,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
struct intel_plane_state *new_plane_state, *old_plane_state;
- struct drm_rect pipe_clip = { .y1 = -1 };
struct intel_plane *plane;
bool full_update = false;
int i, ret;
@@ -1277,13 +1298,25 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (ret)
return ret;
+ /*
+ * Calculate minimal selective fetch area of each plane and calculate
+ * the pipe damaged area.
+ * In the next loop the plane selective fetch area will actually be set
+ * using whole pipe damaged area.
+ */
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
- struct drm_rect temp;
+ struct drm_rect src, damaged_area = { .y1 = -1 };
+ struct drm_mode_rect *damaged_clips;
+ u32 num_clips, j;
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
continue;
+ if (!new_plane_state->uapi.visible &&
+ !old_plane_state->uapi.visible)
+ continue;
+
/*
* TODO: Not clear how to handle planes with negative position,
* also planes are not updated if they have a negative X
@@ -1295,18 +1328,94 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
break;
}
- if (!new_plane_state->uapi.visible)
- continue;
+ num_clips = drm_plane_get_damage_clips_count(&new_plane_state->uapi);
/*
- * For now doing a selective fetch in the whole plane area,
- * optimizations will come in the future.
+ * If visibility or plane moved, mark the whole plane area as
+ * damaged as it needs to be complete redraw in the new and old
+ * position.
*/
- temp.y1 = new_plane_state->uapi.dst.y1;
- temp.y2 = new_plane_state->uapi.dst.y2;
- clip_area_update(&pipe_clip, &temp);
+ if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
+ !drm_rect_equals(&new_plane_state->uapi.dst,
+ &old_plane_state->uapi.dst)) {
+ if (old_plane_state->uapi.visible) {
+ damaged_area.y1 = old_plane_state->uapi.dst.y1;
+ damaged_area.y2 = old_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area);
+ }
+
+ if (new_plane_state->uapi.visible) {
+ damaged_area.y1 = new_plane_state->uapi.dst.y1;
+ damaged_area.y2 = new_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area);
+ }
+ continue;
+ } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha ||
+ (!num_clips &&
+ new_plane_state->uapi.fb != old_plane_state->uapi.fb)) {
+ /*
+ * If the plane don't have damaged areas but the
+ * framebuffer changed or alpha changed, mark the whole
+ * plane area as damaged.
+ */
+ damaged_area.y1 = new_plane_state->uapi.dst.y1;
+ damaged_area.y2 = new_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area);
+ continue;
+ }
+
+ drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
+ damaged_clips = drm_plane_get_damage_clips(&new_plane_state->uapi);
+
+ for (j = 0; j < num_clips; j++) {
+ struct drm_rect clip;
+
+ clip.x1 = damaged_clips[j].x1;
+ clip.y1 = damaged_clips[j].y1;
+ clip.x2 = damaged_clips[j].x2;
+ clip.y2 = damaged_clips[j].y2;
+ if (drm_rect_intersect(&clip, &src))
+ clip_area_update(&damaged_area, &clip);
+ }
+
+ if (damaged_area.y1 == -1)
+ continue;
+
+ damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
+ damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
+ clip_area_update(&pipe_clip, &damaged_area);
+ }
+
+ if (full_update)
+ goto skip_sel_fetch_set_loop;
+
+ /* It must be aligned to 4 lines */
+ pipe_clip.y1 -= pipe_clip.y1 % 4;
+ if (pipe_clip.y2 % 4)
+ pipe_clip.y2 = ((pipe_clip.y2 / 4) + 1) * 4;
+
+ /*
+ * Now that we have the pipe damaged area check if it intersect with
+ * every plane, if it does set the plane selective fetch area.
+ */
+ for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i) {
+ struct drm_rect *sel_fetch_area, inter;
+
+ if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
+ !new_plane_state->uapi.visible)
+ continue;
+
+ inter = pipe_clip;
+ if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
+ continue;
+
+ sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
+ sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
+ sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
}
+skip_sel_fetch_set_loop:
psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 3da2544fa1c0..993543334a1e 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -49,6 +49,8 @@
#include "intel_psr.h"
#include "intel_dsi.h"
#include "intel_sprite.h"
+#include "i9xx_plane.h"
+#include "intel_vrr.h"
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
@@ -61,13 +63,15 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
1000 * adjusted_mode->crtc_htotal);
}
-/* FIXME: We should instead only take spinlocks once for the entire update
- * instead of once per mmio. */
-#if IS_ENABLED(CONFIG_PROVE_LOCKING)
-#define VBLANK_EVASION_TIME_US 250
-#else
-#define VBLANK_EVASION_TIME_US 100
-#endif
+static int intel_mode_vblank_start(const struct drm_display_mode *mode)
+{
+ int vblank_start = mode->crtc_vblank_start;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vblank_start = DIV_ROUND_UP(vblank_start, 2);
+
+ return vblank_start;
+}
/**
* intel_pipe_update_start() - start update of a set of display registers
@@ -97,9 +101,10 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
if (new_crtc_state->uapi.async_flip)
return;
- vblank_start = adjusted_mode->crtc_vblank_start;
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
- vblank_start = DIV_ROUND_UP(vblank_start, 2);
+ if (new_crtc_state->vrr.enable)
+ vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
+ else
+ vblank_start = intel_mode_vblank_start(adjusted_mode);
/* FIXME needs to be calibrated sensibly */
min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
@@ -187,6 +192,36 @@ irq_disable:
local_irq_disable();
}
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
+static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end)
+{
+ u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time));
+ unsigned int h;
+
+ h = ilog2(delta >> 9);
+ if (h >= ARRAY_SIZE(crtc->debug.vbl.times))
+ h = ARRAY_SIZE(crtc->debug.vbl.times) - 1;
+ crtc->debug.vbl.times[h]++;
+
+ crtc->debug.vbl.sum += delta;
+ if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min)
+ crtc->debug.vbl.min = delta;
+ if (delta > crtc->debug.vbl.max)
+ crtc->debug.vbl.max = delta;
+
+ if (delta > 1000 * VBLANK_EVASION_TIME_US) {
+ drm_dbg_kms(crtc->base.dev,
+ "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
+ pipe_name(crtc->pipe),
+ div_u64(delta, 1000),
+ VBLANK_EVASION_TIME_US);
+ crtc->debug.vbl.over++;
+ }
+}
+#else
+static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {}
+#endif
+
/**
* intel_pipe_update_end() - end update of a set of display registers
* @new_crtc_state: the new crtc state
@@ -235,6 +270,9 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
local_irq_enable();
+ /* Send VRR Push to terminate Vblank */
+ intel_vrr_send_push(new_crtc_state);
+
if (intel_vgpu_active(dev_priv))
return;
@@ -249,15 +287,8 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
crtc->debug.min_vbl, crtc->debug.max_vbl,
crtc->debug.scanline_start, scanline_end);
}
-#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE
- else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) >
- VBLANK_EVASION_TIME_US)
- drm_warn(&dev_priv->drm,
- "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
- pipe_name(pipe),
- ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
- VBLANK_EVASION_TIME_US);
-#endif
+
+ dbg_vblank_evade(crtc, end_vbl_time);
}
int intel_plane_check_stride(const struct intel_plane_state *plane_state)
@@ -710,7 +741,8 @@ icl_program_input_csc(struct intel_plane *plane,
static void
skl_plane_async_flip(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+ const struct intel_plane_state *plane_state,
+ bool async_flip)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
unsigned long irqflags;
@@ -721,6 +753,9 @@ skl_plane_async_flip(struct intel_plane *plane,
plane_ctl |= skl_plane_ctl_crtc(crtc_state);
+ if (async_flip)
+ plane_ctl |= PLANE_CTL_ASYNC_FLIP;
+
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
@@ -806,6 +841,10 @@ skl_program_plane(struct intel_plane *plane,
if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
icl_program_input_csc(plane, crtc_state, plane_state);
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
+ intel_uncore_write64_fw(&dev_priv->uncore,
+ PLANE_CC_VAL(pipe, plane_id), plane_state->ccval);
+
skl_write_plane_wm(plane, crtc_state);
intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id),
@@ -897,6 +936,28 @@ skl_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
+static void
+skl_plane_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+skl_plane_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
static void i9xx_plane_linear_gamma(u16 gamma[8])
{
/* The points are not evenly spaced. */
@@ -1790,7 +1851,26 @@ g4x_sprite_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation)
{
- return 16384;
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return min(4096 * cpp, 16 * 1024);
+ else
+ return 16 * 1024;
+}
+
+static unsigned int
+hsw_sprite_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */
+ return min(8192 * cpp, 16 * 1024);
}
static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
@@ -2305,7 +2385,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)) {
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)) {
drm_dbg_kms(&dev_priv->drm,
"Y/Yf tiling not supported in IF-ID mode\n");
return -EINVAL;
@@ -2795,6 +2876,7 @@ static const u64 skl_plane_format_modifiers_ccs[] = {
static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
@@ -2803,6 +2885,7 @@ static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
static const u64 gen12_plane_format_modifiers_rc_ccs[] = {
I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
@@ -2993,6 +3076,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
break;
default:
return false;
@@ -3229,7 +3313,13 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->get_hw_state = skl_plane_get_hw_state;
plane->check_plane = skl_plane_check;
plane->min_cdclk = skl_plane_min_cdclk;
- plane->async_flip = skl_plane_async_flip;
+
+ if (plane_id == PLANE_PRIMARY) {
+ plane->need_async_flip_disable_wa = IS_GEN_RANGE(dev_priv, 9, 10);
+ plane->async_flip = skl_plane_async_flip;
+ plane->enable_flip_done = skl_plane_enable_flip_done;
+ plane->disable_flip_done = skl_plane_disable_flip_done;
+ }
if (INTEL_GEN(dev_priv) >= 11)
formats = icl_get_plane_formats(dev_priv, pipe,
@@ -3337,11 +3427,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
return plane;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- plane->max_stride = i9xx_plane_max_stride;
plane->update_plane = vlv_update_plane;
plane->disable_plane = vlv_disable_plane;
plane->get_hw_state = vlv_plane_get_hw_state;
plane->check_plane = vlv_sprite_check;
+ plane->max_stride = i965_plane_max_stride;
plane->min_cdclk = vlv_plane_min_cdclk;
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
@@ -3355,16 +3445,18 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_funcs = &vlv_sprite_funcs;
} else if (INTEL_GEN(dev_priv) >= 7) {
- plane->max_stride = g4x_sprite_max_stride;
plane->update_plane = ivb_update_plane;
plane->disable_plane = ivb_disable_plane;
plane->get_hw_state = ivb_plane_get_hw_state;
plane->check_plane = g4x_sprite_check;
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ plane->max_stride = hsw_sprite_max_stride;
plane->min_cdclk = hsw_plane_min_cdclk;
- else
+ } else {
+ plane->max_stride = g4x_sprite_max_stride;
plane->min_cdclk = ivb_sprite_min_cdclk;
+ }
formats = snb_plane_formats;
num_formats = ARRAY_SIZE(snb_plane_formats);
@@ -3372,11 +3464,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_funcs = &snb_sprite_funcs;
} else {
- plane->max_stride = g4x_sprite_max_stride;
plane->update_plane = g4x_update_plane;
plane->disable_plane = g4x_disable_plane;
plane->get_hw_state = g4x_plane_get_hw_state;
plane->check_plane = g4x_sprite_check;
+ plane->max_stride = g4x_sprite_max_stride;
plane->min_cdclk = g4x_sprite_min_cdclk;
modifiers = i9xx_plane_format_modifiers;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index cd2104ba1ca1..76126dd8d584 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -17,6 +17,16 @@ struct drm_i915_private;
struct intel_crtc_state;
struct intel_plane_state;
+/*
+ * FIXME: We should instead only take spinlocks once for the entire update
+ * instead of once per mmio.
+ */
+#if IS_ENABLED(CONFIG_PROVE_LOCKING)
+#define VBLANK_EVASION_TIME_US 250
+#else
+#define VBLANK_EVASION_TIME_US 100
+#endif
+
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 8b6f16f9d0d1..2cefc13535a0 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -232,7 +232,7 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
mask |= BIT(TC_PORT_LEGACY);
/* The sink can be connected only in a single mode. */
- if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
+ if (!drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1))
tc_port_fixup_legacy_flag(dig_port, mask);
return mask;
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 49b4b5fca941..187ec573de59 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -319,6 +319,8 @@ enum vbt_gmbus_ddi {
ICL_DDC_BUS_DDI_A = 0x1,
ICL_DDC_BUS_DDI_B,
TGL_DDC_BUS_DDI_C,
+ RKL_DDC_BUS_DDI_D = 0x3,
+ RKL_DDC_BUS_DDI_E,
ICL_DDC_BUS_PORT_1 = 0x4,
ICL_DDC_BUS_PORT_2,
ICL_DDC_BUS_PORT_3,
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index e2716a67b281..f58cc5700784 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -454,8 +454,6 @@ int intel_dsc_compute_params(struct intel_encoder *encoder,
else if (vdsc_cfg->bits_per_component == 12)
vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
- /* RC_MODEL_SIZE is a constant across all configurations */
- vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
/* InitialScaleValue is a 6 bit value with 3 fractional bits (U3.3) */
vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
(vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
@@ -741,7 +739,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
/* Populate PICTURE_PARAMETER_SET_9 registers */
pps_val = 0;
- pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) |
+ pps_val |= DSC_RC_MODEL_SIZE(vdsc_cfg->rc_model_size) |
DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
drm_info(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
new file mode 100644
index 000000000000..a9c2b2fd9252
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_vrr.h"
+
+bool intel_vrr_is_capable(struct drm_connector *connector)
+{
+ struct intel_dp *intel_dp;
+ const struct drm_display_info *info = &connector->display_info;
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
+ connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ return false;
+
+ intel_dp = intel_attached_dp(to_intel_connector(connector));
+ /*
+ * DP Sink is capable of VRR video timings if
+ * Ignore MSA bit is set in DPCD.
+ * EDID monitor range also should be atleast 10 for reasonable
+ * Adaptive Sync or Variable Refresh Rate end user experience.
+ */
+ return HAS_VRR(i915) &&
+ drm_dp_sink_can_do_video_without_timing_msa(intel_dp->dpcd) &&
+ info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10;
+}
+
+void
+intel_vrr_check_modeset(struct intel_atomic_state *state)
+{
+ int i;
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (new_crtc_state->uapi.vrr_enabled !=
+ old_crtc_state->uapi.vrr_enabled)
+ new_crtc_state->uapi.mode_changed = true;
+ }
+}
+
+/*
+ * Without VRR registers get latched at:
+ * vblank_start
+ *
+ * With VRR the earliest registers can get latched is:
+ * intel_vrr_vmin_vblank_start(), which if we want to maintain
+ * the correct min vtotal is >=vblank_start+1
+ *
+ * The latest point registers can get latched is the vmax decision boundary:
+ * intel_vrr_vmax_vblank_start()
+ *
+ * Between those two points the vblank exit starts (and hence registers get
+ * latched) ASAP after a push is sent.
+ *
+ * framestart_delay is programmable 0-3.
+ */
+static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ /* The hw imposes the extra scanline before frame start */
+ return crtc_state->vrr.pipeline_full + i915->framestart_delay + 1;
+}
+
+int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state)
+{
+ /* Min vblank actually determined by flipline that is always >=vmin+1 */
+ return crtc_state->vrr.vmin + 1 - intel_vrr_vblank_exit_length(crtc_state);
+}
+
+int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->vrr.vmax - intel_vrr_vblank_exit_length(crtc_state);
+}
+
+void
+intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ const struct drm_display_info *info = &connector->base.display_info;
+ int vmin, vmax;
+
+ if (!intel_vrr_is_capable(&connector->base))
+ return;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return;
+
+ if (!crtc_state->uapi.vrr_enabled)
+ return;
+
+ vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000,
+ adjusted_mode->crtc_htotal * info->monitor_range.max_vfreq);
+ vmax = adjusted_mode->crtc_clock * 1000 /
+ (adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq);
+
+ vmin = max_t(int, vmin, adjusted_mode->crtc_vtotal);
+ vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal);
+
+ if (vmin >= vmax)
+ return;
+
+ /*
+ * flipline determines the min vblank length the hardware will
+ * generate, and flipline>=vmin+1, hence we reduce vmin by one
+ * to make sure we can get the actual min vblank length.
+ */
+ crtc_state->vrr.vmin = vmin - 1;
+ crtc_state->vrr.vmax = vmax;
+ crtc_state->vrr.enable = true;
+
+ crtc_state->vrr.flipline = crtc_state->vrr.vmin + 1;
+
+ /*
+ * FIXME: s/4/framestart_delay+1/ to get consistent
+ * earliest/latest points for register latching regardless
+ * of the framestart_delay used?
+ *
+ * FIXME: this really needs the extra scanline to provide consistent
+ * behaviour for all framestart_delay values. Otherwise with
+ * framestart_delay==3 we will end up extending the min vblank by
+ * one extra line.
+ */
+ crtc_state->vrr.pipeline_full =
+ min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - 4 - 1);
+
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
+
+void intel_vrr_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 trans_vrr_ctl;
+
+ if (!crtc_state->vrr.enable)
+ return;
+
+ trans_vrr_ctl = VRR_CTL_VRR_ENABLE |
+ VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
+ VRR_CTL_PIPELINE_FULL(crtc_state->vrr.pipeline_full) |
+ VRR_CTL_PIPELINE_FULL_OVERRIDE;
+
+ intel_de_write(dev_priv, TRANS_VRR_VMIN(cpu_transcoder), crtc_state->vrr.vmin - 1);
+ intel_de_write(dev_priv, TRANS_VRR_VMAX(cpu_transcoder), crtc_state->vrr.vmax - 1);
+ intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), trans_vrr_ctl);
+ intel_de_write(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder), crtc_state->vrr.flipline - 1);
+ intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), TRANS_PUSH_EN);
+}
+
+void intel_vrr_send_push(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!crtc_state->vrr.enable)
+ return;
+
+ intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder),
+ TRANS_PUSH_EN | TRANS_PUSH_SEND);
+}
+
+void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
+
+ if (!old_crtc_state->vrr.enable)
+ return;
+
+ intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), 0);
+ intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), 0);
+}
+
+void intel_vrr_get_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 trans_vrr_ctl;
+
+ trans_vrr_ctl = intel_de_read(dev_priv, TRANS_VRR_CTL(cpu_transcoder));
+ crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
+ if (!crtc_state->vrr.enable)
+ return;
+
+ if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE)
+ crtc_state->vrr.pipeline_full = REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl);
+ if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN)
+ crtc_state->vrr.flipline = intel_de_read(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder)) + 1;
+ crtc_state->vrr.vmax = intel_de_read(dev_priv, TRANS_VRR_VMAX(cpu_transcoder)) + 1;
+ crtc_state->vrr.vmin = intel_de_read(dev_priv, TRANS_VRR_VMIN(cpu_transcoder)) + 1;
+
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
new file mode 100644
index 000000000000..fac01bf4ab50
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_VRR_H__
+#define __INTEL_VRR_H__
+
+#include <linux/types.h>
+
+struct drm_connector;
+struct drm_connector_state;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_encoder;
+struct intel_crtc;
+
+bool intel_vrr_is_capable(struct drm_connector *connector);
+void intel_vrr_check_modeset(struct intel_atomic_state *state);
+void intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+void intel_vrr_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_vrr_send_push(const struct intel_crtc_state *crtc_state);
+void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state);
+void intel_vrr_get_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
+int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state);
+int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_VRR_H__ */
diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c
new file mode 100644
index 000000000000..9e508e7d4629
--- /dev/null
+++ b/drivers/gpu/drm/i915/dma_resv_utils.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/dma-resv.h>
+
+#include "dma_resv_utils.h"
+
+void dma_resv_prune(struct dma_resv *resv)
+{
+ if (dma_resv_trylock(resv)) {
+ if (dma_resv_test_signaled_rcu(resv, true))
+ dma_resv_add_excl_fence(resv, NULL);
+ dma_resv_unlock(resv);
+ }
+}
diff --git a/drivers/gpu/drm/i915/dma_resv_utils.h b/drivers/gpu/drm/i915/dma_resv_utils.h
new file mode 100644
index 000000000000..b9d8fb5f8367
--- /dev/null
+++ b/drivers/gpu/drm/i915/dma_resv_utils.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DMA_RESV_UTILS_H
+#define DMA_RESV_UTILS_H
+
+struct dma_resv;
+
+void dma_resv_prune(struct dma_resv *resv);
+
+#endif /* DMA_RESV_UTILS_H */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 4fd38101bb56..4d2f40cf237b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -72,6 +72,8 @@
#include "gt/intel_context_param.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_user.h"
+#include "gt/intel_execlists_submission.h" /* virtual_engine */
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
#include "i915_gem_context.h"
@@ -333,13 +335,12 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
return e;
}
-static void i915_gem_context_free(struct i915_gem_context *ctx)
+void i915_gem_context_release(struct kref *ref)
{
- GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
- spin_lock(&ctx->i915->gem.contexts.lock);
- list_del(&ctx->link);
- spin_unlock(&ctx->i915->gem.contexts.lock);
+ trace_i915_context_free(ctx);
+ GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
mutex_destroy(&ctx->engines_mutex);
mutex_destroy(&ctx->lut_mutex);
@@ -353,37 +354,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
kfree_rcu(ctx, rcu);
}
-static void contexts_free_all(struct llist_node *list)
-{
- struct i915_gem_context *ctx, *cn;
-
- llist_for_each_entry_safe(ctx, cn, list, free_link)
- i915_gem_context_free(ctx);
-}
-
-static void contexts_flush_free(struct i915_gem_contexts *gc)
-{
- contexts_free_all(llist_del_all(&gc->free_list));
-}
-
-static void contexts_free_worker(struct work_struct *work)
-{
- struct i915_gem_contexts *gc =
- container_of(work, typeof(*gc), free_work);
-
- contexts_flush_free(gc);
-}
-
-void i915_gem_context_release(struct kref *ref)
-{
- struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
- struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
-
- trace_i915_context_free(ctx);
- if (llist_add(&ctx->free_link, &gc->free_list))
- schedule_work(&gc->free_work);
-}
-
static inline struct i915_gem_engines *
__context_engines_static(const struct i915_gem_context *ctx)
{
@@ -438,7 +408,7 @@ __active_engine(struct i915_request *rq, struct intel_engine_cs **active)
}
if (i915_request_is_active(rq)) {
- if (!i915_request_completed(rq))
+ if (!__i915_request_is_complete(rq))
*active = locked;
ret = true;
}
@@ -453,6 +423,9 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
struct intel_engine_cs *engine = NULL;
struct i915_request *rq;
+ if (intel_context_has_inflight(ce))
+ return intel_context_inflight(ce);
+
if (!ce->timeline)
return NULL;
@@ -632,6 +605,10 @@ static void context_close(struct i915_gem_context *ctx)
*/
lut_close(ctx);
+ spin_lock(&ctx->i915->gem.contexts.lock);
+ list_del(&ctx->link);
+ spin_unlock(&ctx->i915->gem.contexts.lock);
+
mutex_unlock(&ctx->mutex);
/*
@@ -740,7 +717,8 @@ err_free:
}
static inline struct i915_gem_engines *
-__context_engines_await(const struct i915_gem_context *ctx)
+__context_engines_await(const struct i915_gem_context *ctx,
+ bool *user_engines)
{
struct i915_gem_engines *engines;
@@ -749,6 +727,10 @@ __context_engines_await(const struct i915_gem_context *ctx)
engines = rcu_dereference(ctx->engines);
GEM_BUG_ON(!engines);
+ if (user_engines)
+ *user_engines = i915_gem_context_user_engines(ctx);
+
+ /* successful await => strong mb */
if (unlikely(!i915_sw_fence_await(&engines->fence)))
continue;
@@ -772,7 +754,7 @@ context_apply_all(struct i915_gem_context *ctx,
struct intel_context *ce;
int err = 0;
- e = __context_engines_await(ctx);
+ e = __context_engines_await(ctx, NULL);
for_each_gem_engine(ce, e, it) {
err = fn(ce, data);
if (err)
@@ -849,9 +831,6 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
!HAS_EXECLISTS(i915))
return ERR_PTR(-EINVAL);
- /* Reap the stale contexts */
- contexts_flush_free(&i915->gem.contexts);
-
ctx = __create_context(i915);
if (IS_ERR(ctx))
return ctx;
@@ -896,23 +875,11 @@ static void init_contexts(struct i915_gem_contexts *gc)
{
spin_lock_init(&gc->lock);
INIT_LIST_HEAD(&gc->list);
-
- INIT_WORK(&gc->free_work, contexts_free_worker);
- init_llist_head(&gc->free_list);
}
void i915_gem_init__contexts(struct drm_i915_private *i915)
{
init_contexts(&i915->gem.contexts);
- drm_dbg(&i915->drm, "%s context support initialized\n",
- DRIVER_CAPS(i915)->has_logical_contexts ?
- "logical" : "fake");
-}
-
-void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
-{
- flush_work(&i915->gem.contexts.free_work);
- rcu_barrier(); /* and flush the left over RCU frees */
}
static int gem_context_register(struct i915_gem_context *ctx,
@@ -988,7 +955,6 @@ err:
void i915_gem_context_close(struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct drm_i915_private *i915 = file_priv->dev_priv;
struct i915_address_space *vm;
struct i915_gem_context *ctx;
unsigned long idx;
@@ -1000,8 +966,6 @@ void i915_gem_context_close(struct drm_file *file)
xa_for_each(&file_priv->vm_xa, idx, vm)
i915_vm_put(vm);
xa_destroy(&file_priv->vm_xa);
-
- contexts_flush_free(&i915->gem.contexts);
}
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
@@ -1116,7 +1080,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
return err;
}
- e = __context_engines_await(ctx);
+ e = __context_engines_await(ctx, NULL);
if (!e) {
i915_active_release(&cb->base);
return -ENOENT;
@@ -1879,27 +1843,6 @@ replace:
return 0;
}
-static struct i915_gem_engines *
-__copy_engines(struct i915_gem_engines *e)
-{
- struct i915_gem_engines *copy;
- unsigned int n;
-
- copy = alloc_engines(e->num_engines);
- if (!copy)
- return ERR_PTR(-ENOMEM);
-
- for (n = 0; n < e->num_engines; n++) {
- if (e->engines[n])
- copy->engines[n] = intel_context_get(e->engines[n]);
- else
- copy->engines[n] = NULL;
- }
- copy->num_engines = n;
-
- return copy;
-}
-
static int
get_engines(struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
@@ -1907,19 +1850,17 @@ get_engines(struct i915_gem_context *ctx,
struct i915_context_param_engines __user *user;
struct i915_gem_engines *e;
size_t n, count, size;
+ bool user_engines;
int err = 0;
- err = mutex_lock_interruptible(&ctx->engines_mutex);
- if (err)
- return err;
+ e = __context_engines_await(ctx, &user_engines);
+ if (!e)
+ return -ENOENT;
- e = NULL;
- if (i915_gem_context_user_engines(ctx))
- e = __copy_engines(i915_gem_context_engines(ctx));
- mutex_unlock(&ctx->engines_mutex);
- if (IS_ERR_OR_NULL(e)) {
+ if (!user_engines) {
+ i915_sw_fence_complete(&e->fence);
args->size = 0;
- return PTR_ERR_OR_ZERO(e);
+ return 0;
}
count = e->num_engines;
@@ -1970,7 +1911,7 @@ get_engines(struct i915_gem_context *ctx,
args->size = size;
err_free:
- free_engines(e);
+ i915_sw_fence_complete(&e->fence);
return err;
}
@@ -2136,11 +2077,14 @@ static int copy_ring_size(struct intel_context *dst,
static int clone_engines(struct i915_gem_context *dst,
struct i915_gem_context *src)
{
- struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
- struct i915_gem_engines *clone;
+ struct i915_gem_engines *clone, *e;
bool user_engines;
unsigned long n;
+ e = __context_engines_await(src, &user_engines);
+ if (!e)
+ return -ENOENT;
+
clone = alloc_engines(e->num_engines);
if (!clone)
goto err_unlock;
@@ -2182,9 +2126,7 @@ static int clone_engines(struct i915_gem_context *dst,
}
}
clone->num_engines = n;
-
- user_engines = i915_gem_context_user_engines(src);
- i915_gem_context_unlock_engines(src);
+ i915_sw_fence_complete(&e->fence);
/* Serialised by constructor */
engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1));
@@ -2195,7 +2137,7 @@ static int clone_engines(struct i915_gem_context *dst,
return 0;
err_unlock:
- i915_gem_context_unlock_engines(src);
+ i915_sw_fence_complete(&e->fence);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index a133f92bbedb..b5c908f3f4f2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -110,7 +110,6 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
/* i915_gem_context.c */
void i915_gem_init__contexts(struct drm_i915_private *i915);
-void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index ae14ca24a11f..1449f54924e0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -108,7 +108,6 @@ struct i915_gem_context {
/** link: place with &drm_i915_private.context_list */
struct list_head link;
- struct llist_node free_link;
/**
* @ref: reference count
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
new file mode 100644
index 000000000000..45d60e3d98e3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_region.h"
+
+#include "i915_drv.h"
+
+static int
+i915_gem_create(struct drm_file *file,
+ struct intel_memory_region *mr,
+ u64 *size_p,
+ u32 *handle_p)
+{
+ struct drm_i915_gem_object *obj;
+ u32 handle;
+ u64 size;
+ int ret;
+
+ GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
+ size = round_up(*size_p, mr->min_page_size);
+ if (size == 0)
+ return -EINVAL;
+
+ /* For most of the ABI (e.g. mmap) we think in system pages */
+ GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
+ /* Allocate the new object */
+ obj = i915_gem_object_create_region(mr, size, 0);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ GEM_BUG_ON(size != obj->base.size);
+
+ ret = drm_gem_handle_create(file, &obj->base, &handle);
+ /* drop reference from allocate - handle holds it now */
+ i915_gem_object_put(obj);
+ if (ret)
+ return ret;
+
+ *handle_p = handle;
+ *size_p = size;
+ return 0;
+}
+
+int
+i915_gem_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ enum intel_memory_type mem_type;
+ int cpp = DIV_ROUND_UP(args->bpp, 8);
+ u32 format;
+
+ switch (cpp) {
+ case 1:
+ format = DRM_FORMAT_C8;
+ break;
+ case 2:
+ format = DRM_FORMAT_RGB565;
+ break;
+ case 4:
+ format = DRM_FORMAT_XRGB8888;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* have to work out size/pitch and return them */
+ args->pitch = ALIGN(args->width * cpp, 64);
+
+ /* align stride to page size so that we can remap */
+ if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
+ DRM_FORMAT_MOD_LINEAR))
+ args->pitch = ALIGN(args->pitch, 4096);
+
+ if (args->pitch < args->width)
+ return -EINVAL;
+
+ args->size = mul_u32_u32(args->pitch, args->height);
+
+ mem_type = INTEL_MEMORY_SYSTEM;
+ if (HAS_LMEM(to_i915(dev)))
+ mem_type = INTEL_MEMORY_LOCAL;
+
+ return i915_gem_create(file,
+ intel_memory_region_by_type(to_i915(dev),
+ mem_type),
+ &args->size, &args->handle);
+}
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_i915_gem_create *args = data;
+
+ i915_gem_flush_free_objects(i915);
+
+ return i915_gem_create(file,
+ intel_memory_region_by_type(i915,
+ INTEL_MEMORY_SYSTEM),
+ &args->size, &args->handle);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 3d435bfff764..36f54cedaaeb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -5,6 +5,7 @@
*/
#include "display/intel_frontbuffer.h"
+#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
@@ -15,13 +16,58 @@
#include "i915_gem_lmem.h"
#include "i915_gem_mman.h"
+static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+ return !(obj->cache_level == I915_CACHE_NONE ||
+ obj->cache_level == I915_CACHE_WT);
+}
+
+static void
+flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
+{
+ struct i915_vma *vma;
+
+ assert_object_held(obj);
+
+ if (!(obj->write_domain & flush_domains))
+ return;
+
+ switch (obj->write_domain) {
+ case I915_GEM_DOMAIN_GTT:
+ spin_lock(&obj->vma.lock);
+ for_each_ggtt_vma(vma, obj) {
+ if (i915_vma_unset_ggtt_write(vma))
+ intel_gt_flush_ggtt_writes(vma->vm->gt);
+ }
+ spin_unlock(&obj->vma.lock);
+
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
+ break;
+
+ case I915_GEM_DOMAIN_WC:
+ wmb();
+ break;
+
+ case I915_GEM_DOMAIN_CPU:
+ i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
+ break;
+
+ case I915_GEM_DOMAIN_RENDER:
+ if (gpu_write_needs_clflush(obj))
+ obj->cache_dirty = true;
+ break;
+ }
+
+ obj->write_domain = 0;
+}
+
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
{
/*
* We manually flush the CPU domain so that we can override and
* force the flush for the display, and perform it asyncrhonously.
*/
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
if (obj->cache_dirty)
i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
obj->write_domain = 0;
@@ -80,7 +126,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
/* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the
@@ -141,7 +187,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
/* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the
@@ -370,6 +416,7 @@ retry:
}
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
+ i915_vma_mark_scanout(vma);
i915_gem_object_flush_if_display_locked(obj);
@@ -409,7 +456,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* Flush the CPU cache if it's still invalid. */
if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
@@ -574,7 +621,7 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
goto out;
}
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* If we're not in the cpu read domain, set ourself into the gtt
* read domain and manually flush cachelines (if required). This
@@ -625,7 +672,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
goto out;
}
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* If we're not in the cpu write domain, set ourself into the
* gtt write domain and manually flush cachelines (as required).
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index bd3046e5a934..d70ca36f74f6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -15,6 +15,7 @@
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_gt_pm.h"
@@ -534,8 +535,6 @@ eb_add_vma(struct i915_execbuffer *eb,
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
struct eb_vma *ev = &eb->vma[i];
- GEM_BUG_ON(i915_vma_is_closed(vma));
-
ev->vma = vma;
ev->exec = entry;
ev->flags = entry->flags;
@@ -1277,7 +1276,10 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
int err;
if (!pool) {
- pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE);
+ pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE,
+ cache->has_llc ?
+ I915_MAP_WB :
+ I915_MAP_WC);
if (IS_ERR(pool))
return PTR_ERR(pool);
}
@@ -1287,10 +1289,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err)
goto err_pool;
- cmd = i915_gem_object_pin_map(pool->obj,
- cache->has_llc ?
- I915_MAP_FORCE_WB :
- I915_MAP_FORCE_WC);
+ cmd = i915_gem_object_pin_map(pool->obj, pool->type);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto err_pool;
@@ -2459,7 +2458,8 @@ static int eb_parse(struct i915_execbuffer *eb)
return -EINVAL;
if (!pool) {
- pool = intel_gt_get_buffer_pool(eb->engine->gt, len);
+ pool = intel_gt_get_buffer_pool(eb->engine->gt, len,
+ I915_MAP_WB);
if (IS_ERR(pool))
return PTR_ERR(pool);
eb->batch_pool = pool;
@@ -2535,6 +2535,9 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
{
int err;
+ if (intel_context_nopreempt(eb->context))
+ __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
+
err = eb_move_to_gpu(eb);
if (err)
return err;
@@ -2575,15 +2578,12 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
return err;
}
- if (intel_context_nopreempt(eb->context))
- __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
-
return 0;
}
static int num_vcs_engines(const struct drm_i915_private *i915)
{
- return hweight64(VDBOX_MASK(&i915->gt));
+ return hweight_long(VDBOX_MASK(&i915->gt));
}
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 932ee21e6609..194f35342710 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -31,18 +31,13 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
size, flags);
}
-struct drm_i915_gem_object *
-__i915_gem_lmem_object_create(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
+int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags)
{
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;
- struct drm_i915_gem_object *obj;
-
- obj = i915_gem_object_alloc();
- if (!obj)
- return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class);
@@ -53,5 +48,5 @@ __i915_gem_lmem_object_create(struct intel_memory_region *mem,
i915_gem_object_init_memory_region(obj, mem, flags);
- return obj;
+ return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index fc3f15580fe3..036d53c01de9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -21,9 +21,9 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
unsigned int flags);
-struct drm_i915_gem_object *
-__i915_gem_lmem_object_create(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags);
+int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags);
#endif /* !__I915_GEM_LMEM_H */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 00d24000b5e8..70f798405f7f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -25,13 +25,13 @@
#include <linux/sched/mm.h>
#include "display/intel_frontbuffer.h"
-#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
#include "i915_gem_mman.h"
#include "i915_gem_object.h"
#include "i915_globals.h"
+#include "i915_memcpy.h"
#include "i915_trace.h"
static struct i915_global_object {
@@ -313,52 +313,6 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
queue_work(i915->wq, &i915->mm.free_work);
}
-static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
-{
- return !(obj->cache_level == I915_CACHE_NONE ||
- obj->cache_level == I915_CACHE_WT);
-}
-
-void
-i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
- unsigned int flush_domains)
-{
- struct i915_vma *vma;
-
- assert_object_held(obj);
-
- if (!(obj->write_domain & flush_domains))
- return;
-
- switch (obj->write_domain) {
- case I915_GEM_DOMAIN_GTT:
- spin_lock(&obj->vma.lock);
- for_each_ggtt_vma(vma, obj) {
- if (i915_vma_unset_ggtt_write(vma))
- intel_gt_flush_ggtt_writes(vma->vm->gt);
- }
- spin_unlock(&obj->vma.lock);
-
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
- break;
-
- case I915_GEM_DOMAIN_WC:
- wmb();
- break;
-
- case I915_GEM_DOMAIN_CPU:
- i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
- break;
-
- case I915_GEM_DOMAIN_RENDER:
- if (gpu_write_needs_clflush(obj))
- obj->cache_dirty = true;
- break;
- }
-
- obj->write_domain = 0;
-}
-
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
@@ -383,6 +337,70 @@ void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
}
}
+static void
+i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
+{
+ void *src_map;
+ void *src_ptr;
+
+ src_map = kmap_atomic(i915_gem_object_get_page(obj, offset >> PAGE_SHIFT));
+
+ src_ptr = src_map + offset_in_page(offset);
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ drm_clflush_virt_range(src_ptr, size);
+ memcpy(dst, src_ptr, size);
+
+ kunmap_atomic(src_map);
+}
+
+static void
+i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
+{
+ void __iomem *src_map;
+ void __iomem *src_ptr;
+ dma_addr_t dma = i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT);
+
+ src_map = io_mapping_map_wc(&obj->mm.region->iomap,
+ dma - obj->mm.region->region.start,
+ PAGE_SIZE);
+
+ src_ptr = src_map + offset_in_page(offset);
+ if (!i915_memcpy_from_wc(dst, (void __force *)src_ptr, size))
+ memcpy_fromio(dst, src_ptr, size);
+
+ io_mapping_unmap(src_map);
+}
+
+/**
+ * i915_gem_object_read_from_page - read data from the page of a GEM object
+ * @obj: GEM object to read from
+ * @offset: offset within the object
+ * @dst: buffer to store the read data
+ * @size: size to read
+ *
+ * Reads data from @obj at the specified offset. The requested region to read
+ * from can't cross a page boundary. The caller must ensure that @obj pages
+ * are pinned and that @obj is synced wrt. any related writes.
+ *
+ * Returns 0 on success or -ENODEV if the type of @obj's backing store is
+ * unsupported.
+ */
+int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
+{
+ GEM_BUG_ON(offset >= obj->base.size);
+ GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size);
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ if (i915_gem_object_has_struct_page(obj))
+ i915_gem_object_read_from_page_kmap(obj, offset, dst, size);
+ else if (i915_gem_object_has_iomem(obj))
+ i915_gem_object_read_from_page_iomap(obj, offset, dst, size);
+ else
+ return -ENODEV;
+
+ return 0;
+}
+
void i915_gem_init__objects(struct drm_i915_private *i915)
{
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 4556afe18f16..d0ae834d787a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -188,6 +188,24 @@ i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
}
static inline bool
+i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj)
+{
+ return test_bit(I915_TILING_QUIRK_BIT, &obj->flags);
+}
+
+static inline void
+i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj)
+{
+ set_bit(I915_TILING_QUIRK_BIT, &obj->flags);
+}
+
+static inline void
+i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
+{
+ clear_bit(I915_TILING_QUIRK_BIT, &obj->flags);
+}
+
+static inline bool
i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
unsigned long flags)
{
@@ -201,6 +219,12 @@ i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
}
static inline bool
+i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
+}
+
+static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
@@ -384,14 +408,6 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
-enum i915_map_type {
- I915_MAP_WB = 0,
- I915_MAP_WC,
-#define I915_MAP_OVERRIDE BIT(31)
- I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
- I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
-};
-
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
* @obj: the object to map into kernel address space
@@ -435,10 +451,6 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
-void
-i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
- unsigned int flush_domains);
-
int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
unsigned int *needs_clflush);
int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
@@ -511,6 +523,9 @@ static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
obj->cache_dirty = true;
}
+void i915_gem_fence_wait_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr);
+
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
long timeout);
@@ -539,4 +554,8 @@ i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
__i915_gem_object_invalidate_frontbuffer(obj, origin);
}
+int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size);
+
+bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
+
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index aee7ad3cc3c6..d6dac21fce0b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_ring.h"
@@ -34,7 +35,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
count = div_u64(round_up(vma->size, block_size), block_size);
size = (1 + 8 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
- pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
+ pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
@@ -54,7 +55,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
if (unlikely(err))
goto out_put;
- cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
+ cmd = i915_gem_object_pin_map(pool->obj, pool->type);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto out_unpin;
@@ -256,7 +257,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
count = div_u64(round_up(dst->size, block_size), block_size);
size = (1 + 11 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
- pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
+ pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
@@ -276,7 +277,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
if (unlikely(err))
goto out_put;
- cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
+ cmd = i915_gem_object_pin_map(pool->obj, pool->type);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto out_unpin;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index e2d9b7e1e152..0438e00d4ca7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -67,6 +67,14 @@ struct drm_i915_gem_object_ops {
const char *name; /* friendly name for debug, e.g. lockdep classes */
};
+enum i915_map_type {
+ I915_MAP_WB = 0,
+ I915_MAP_WC,
+#define I915_MAP_OVERRIDE BIT(31)
+ I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
+ I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
+};
+
enum i915_mmap_type {
I915_MMAP_TYPE_GTT = 0,
I915_MMAP_TYPE_WC,
@@ -142,8 +150,6 @@ struct drm_i915_gem_object {
*/
struct list_head obj_link;
- /** Stolen memory for this object, instead of being backed by shmem. */
- struct drm_mm_node *stolen;
union {
struct rcu_head rcu;
struct llist_node freed;
@@ -167,6 +173,7 @@ struct drm_i915_gem_object {
#define I915_BO_ALLOC_VOLATILE BIT(1)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
#define I915_BO_READONLY BIT(2)
+#define I915_TILING_QUIRK_BIT 3 /* unknown swizzling; do not release! */
/*
* Is the object to be mapped as read-only to the GPU
@@ -275,12 +282,6 @@ struct drm_i915_gem_object {
* pages were last acquired.
*/
bool dirty:1;
-
- /**
- * This is set if the object has been pinned due to unknown
- * swizzling.
- */
- bool quirked:1;
} mm;
/** Record of address bit 17 of each page at last unbind. */
@@ -295,6 +296,8 @@ struct drm_i915_gem_object {
struct work_struct *work;
} userptr;
+ struct drm_mm_node *stolen;
+
unsigned long scratch;
u64 encode;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index e2c7b2a7895f..43028f3539a6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -16,6 +16,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ bool shrinkable;
int i;
lockdep_assert_held(&obj->mm.lock);
@@ -38,13 +39,6 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
obj->mm.pages = pages;
- if (i915_gem_object_is_tiled(obj) &&
- i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
- GEM_BUG_ON(obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
- }
-
GEM_BUG_ON(!sg_page_sizes);
obj->mm.page_sizes.phys = sg_page_sizes;
@@ -63,7 +57,16 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
}
GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
- if (i915_gem_object_is_shrinkable(obj)) {
+ shrinkable = i915_gem_object_is_shrinkable(obj);
+
+ if (i915_gem_object_is_tiled(obj) &&
+ i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_set_tiling_quirk(obj);
+ shrinkable = false;
+ }
+
+ if (shrinkable) {
struct list_head *list;
unsigned long flags;
@@ -238,7 +241,7 @@ unlock:
/* The 'mapping' part of i915_gem_object_pin_map() below */
static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
- enum i915_map_type type)
+ enum i915_map_type type)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
struct page *stack[32], **pages = stack, *page;
@@ -281,7 +284,7 @@ static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
/* Too big for stack -- allocate temporary array instead */
pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
if (!pages)
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
i = 0;
@@ -290,11 +293,12 @@ static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
vaddr = vmap(pages, n_pages, 0, pgprot);
if (pages != stack)
kvfree(pages);
- return vaddr;
+
+ return vaddr ?: ERR_PTR(-ENOMEM);
}
static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
- enum i915_map_type type)
+ enum i915_map_type type)
{
resource_size_t iomap = obj->mm.region->iomap.base -
obj->mm.region->region.start;
@@ -305,13 +309,13 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
void *vaddr;
if (type != I915_MAP_WC)
- return NULL;
+ return ERR_PTR(-ENODEV);
if (n_pfn > ARRAY_SIZE(stack)) {
/* Too big for stack -- allocate temporary array instead */
pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
if (!pfns)
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
i = 0;
@@ -320,7 +324,8 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
if (pfns != stack)
kvfree(pfns);
- return vaddr;
+
+ return vaddr ?: ERR_PTR(-ENOMEM);
}
/* get, pin, and map the pages of the object into kernel space */
@@ -349,8 +354,10 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
err = ____i915_gem_object_get_pages(obj);
- if (err)
- goto err_unlock;
+ if (err) {
+ ptr = ERR_PTR(err);
+ goto out_unlock;
+ }
smp_mb__before_atomic();
}
@@ -362,7 +369,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
if (ptr && has_type != type) {
if (pinned) {
- err = -EBUSY;
+ ptr = ERR_PTR(-EBUSY);
goto err_unpin;
}
@@ -374,15 +381,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
if (!ptr) {
if (GEM_WARN_ON(type == I915_MAP_WC &&
!static_cpu_has(X86_FEATURE_PAT)))
- ptr = NULL;
+ ptr = ERR_PTR(-ENODEV);
else if (i915_gem_object_has_struct_page(obj))
ptr = i915_gem_object_map_page(obj, type);
else
ptr = i915_gem_object_map_pfn(obj, type);
- if (!ptr) {
- err = -ENOMEM;
+ if (IS_ERR(ptr))
goto err_unpin;
- }
obj->mm.mapping = page_pack_bits(ptr, type);
}
@@ -393,8 +398,6 @@ out_unlock:
err_unpin:
atomic_dec(&obj->mm.pages_pin_count);
-err_unlock:
- ptr = ERR_PTR(err);
goto out_unlock;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 3a4dfe2ef1da..3c0b157e2a35 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -213,7 +213,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
if (obj->ops == &i915_gem_phys_ops)
return 0;
- if (obj->ops != &i915_gem_shmem_ops)
+ if (!i915_gem_object_is_shmem(obj))
return -EINVAL;
err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
@@ -227,7 +227,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
goto err_unlock;
}
- if (obj->mm.quirked) {
+ if (i915_gem_object_has_tiling_quirk(obj)) {
err = -EFAULT;
goto err_unlock;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 40d3e40500fa..215766cc22bf 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -11,6 +11,13 @@
#include "i915_drv.h"
+#if defined(CONFIG_X86)
+#include <asm/smp.h>
+#else
+#define wbinvd_on_all_cpus() \
+ pr_warn(DRIVER_NAME ": Missing cache flush in %s\n", __func__)
+#endif
+
void i915_gem_suspend(struct drm_i915_private *i915)
{
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
@@ -32,13 +39,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
i915_gem_drain_freed_objects(i915);
}
-static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
-{
- return list_first_entry_or_null(list,
- struct drm_i915_gem_object,
- mm.link);
-}
-
void i915_gem_suspend_late(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
@@ -48,6 +48,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
NULL
}, **phase;
unsigned long flags;
+ bool flush = false;
/*
* Neither the BIOS, ourselves or any other kernel
@@ -73,29 +74,15 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
spin_lock_irqsave(&i915->mm.obj_lock, flags);
for (phase = phases; *phase; phase++) {
- LIST_HEAD(keep);
-
- while ((obj = first_mm_object(*phase))) {
- list_move_tail(&obj->mm.link, &keep);
-
- /* Beware the background _i915_gem_free_objects */
- if (!kref_get_unless_zero(&obj->base.refcount))
- continue;
-
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
-
- i915_gem_object_lock(obj, NULL);
- drm_WARN_ON(&i915->drm,
- i915_gem_object_set_to_gtt_domain(obj, false));
- i915_gem_object_unlock(obj);
- i915_gem_object_put(obj);
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ list_for_each_entry(obj, *phase, mm.link) {
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ flush |= (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0;
+ __start_cpu_write(obj); /* presume auto-hibernate */
}
-
- list_splice_tail(&keep, *phase);
}
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ if (flush)
+ wbinvd_on_all_cpus();
}
void i915_gem_resume(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 1515384d7e0e..3e3dad22a683 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -22,6 +22,7 @@ i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
int
i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
{
+ const u64 max_segment = i915_sg_segment_size();
struct intel_memory_region *mem = obj->mm.region;
struct list_head *blocks = &obj->mm.blocks;
resource_size_t size = obj->base.size;
@@ -37,7 +38,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
if (!st)
return -ENOMEM;
- if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
+ if (sg_alloc_table(st, size >> PAGE_SHIFT, GFP_KERNEL)) {
kfree(st);
return -ENOMEM;
}
@@ -64,27 +65,30 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
i915_buddy_block_size(&mem->mm, block));
offset = i915_buddy_block_offset(block);
- GEM_BUG_ON(overflows_type(block_size, sg->length));
+ while (block_size) {
+ u64 len;
- if (offset != prev_end ||
- add_overflows_t(typeof(sg->length), sg->length, block_size)) {
- if (st->nents) {
- sg_page_sizes |= sg->length;
- sg = __sg_next(sg);
+ if (offset != prev_end || sg->length >= max_segment) {
+ if (st->nents) {
+ sg_page_sizes |= sg->length;
+ sg = __sg_next(sg);
+ }
+
+ sg_dma_address(sg) = mem->region.start + offset;
+ sg_dma_len(sg) = 0;
+ sg->length = 0;
+ st->nents++;
}
- sg_dma_address(sg) = mem->region.start + offset;
- sg_dma_len(sg) = block_size;
+ len = min(block_size, max_segment - sg->length);
+ sg->length += len;
+ sg_dma_len(sg) += len;
- sg->length = block_size;
+ offset += len;
+ block_size -= len;
- st->nents++;
- } else {
- sg->length += block_size;
- sg_dma_len(sg) += block_size;
+ prev_end = offset;
}
-
- prev_end = offset + block_size;
}
sg_page_sizes |= sg->length;
@@ -139,6 +143,7 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
unsigned int flags)
{
struct drm_i915_gem_object *obj;
+ int err;
/*
* NB: Our use of resource_size_t for the size stems from using struct
@@ -169,9 +174,18 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = mem->ops->create_object(mem, size, flags);
- if (!IS_ERR(obj))
- trace_i915_gem_object_create(obj);
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+ err = mem->ops->init_object(mem, obj, size, flags);
+ if (err)
+ goto err_object_free;
+
+ trace_i915_gem_object_create(obj);
return obj;
+
+err_object_free:
+ i915_gem_object_free(obj);
+ return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 75e8b71c18b9..cf83c208688c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -464,26 +464,21 @@ static int __create_shmem(struct drm_i915_private *i915,
return 0;
}
-static struct drm_i915_gem_object *
-create_shmem(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
+static int shmem_object_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags)
{
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;
- struct drm_i915_gem_object *obj;
struct address_space *mapping;
unsigned int cache_level;
gfp_t mask;
int ret;
- obj = i915_gem_object_alloc();
- if (!obj)
- return ERR_PTR(-ENOMEM);
-
ret = __create_shmem(i915, &obj->base, size);
if (ret)
- goto fail;
+ return ret;
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
if (IS_I965GM(i915) || IS_I965G(i915)) {
@@ -522,11 +517,7 @@ create_shmem(struct intel_memory_region *mem,
i915_gem_object_init_memory_region(obj, mem, 0);
- return obj;
-
-fail:
- i915_gem_object_free(obj);
- return ERR_PTR(ret);
+ return 0;
}
struct drm_i915_gem_object *
@@ -611,7 +602,7 @@ static void release_shmem(struct intel_memory_region *mem)
static const struct intel_memory_region_ops shmem_region_ops = {
.init = init_shmem,
.release = release_shmem,
- .create_object = create_shmem,
+ .init_object = shmem_object_init,
};
struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915)
@@ -621,3 +612,8 @@ struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915)
PAGE_SIZE, 0,
&shmem_region_ops);
}
+
+bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops == &i915_gem_shmem_ops;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index dc8f052a0ffe..c2dba1cd9532 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -15,6 +15,7 @@
#include "gt/intel_gt_requests.h"
+#include "dma_resv_utils.h"
#include "i915_trace.h"
static bool swap_available(void)
@@ -209,6 +210,8 @@ i915_gem_shrink(struct drm_i915_private *i915,
mutex_unlock(&obj->mm.lock);
}
+ dma_resv_prune(obj->base.resv);
+
scanned += obj->base.size >> PAGE_SHIFT;
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 29bffc6afcc1..551935348ad8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -608,11 +608,10 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
GEM_BUG_ON(!stolen);
-
- i915_gem_object_release_memory_region(obj);
-
i915_gem_stolen_remove_node(i915, stolen);
kfree(stolen);
+
+ i915_gem_object_release_memory_region(obj);
}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
@@ -622,18 +621,13 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
.release = i915_gem_object_release_stolen,
};
-static struct drm_i915_gem_object *
-__i915_gem_object_create_stolen(struct intel_memory_region *mem,
- struct drm_mm_node *stolen)
+static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ struct drm_mm_node *stolen)
{
static struct lock_class_key lock_class;
- struct drm_i915_gem_object *obj;
unsigned int cache_level;
- int err = -ENOMEM;
-
- obj = i915_gem_object_alloc();
- if (!obj)
- goto err;
+ int err;
drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
@@ -645,55 +639,47 @@ __i915_gem_object_create_stolen(struct intel_memory_region *mem,
err = i915_gem_object_pin_pages(obj);
if (err)
- goto cleanup;
+ return err;
i915_gem_object_init_memory_region(obj, mem, 0);
- return obj;
-
-cleanup:
- i915_gem_object_free(obj);
-err:
- return ERR_PTR(err);
+ return 0;
}
-static struct drm_i915_gem_object *
-_i915_gem_object_create_stolen(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
+static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags)
{
struct drm_i915_private *i915 = mem->i915;
- struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
if (!drm_mm_initialized(&i915->mm.stolen))
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
if (size == 0)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096);
- if (ret) {
- obj = ERR_PTR(ret);
+ if (ret)
goto err_free;
- }
- obj = __i915_gem_object_create_stolen(mem, stolen);
- if (IS_ERR(obj))
+ ret = __i915_gem_object_create_stolen(mem, obj, stolen);
+ if (ret)
goto err_remove;
- return obj;
+ return 0;
err_remove:
i915_gem_stolen_remove_node(i915, stolen);
err_free:
kfree(stolen);
- return obj;
+ return ret;
}
struct drm_i915_gem_object *
@@ -723,7 +709,7 @@ static void release_stolen(struct intel_memory_region *mem)
static const struct intel_memory_region_ops i915_region_stolen_ops = {
.init = init_stolen,
.release = release_stolen,
- .create_object = _i915_gem_object_create_stolen,
+ .init_object = _i915_gem_object_stolen_init,
};
struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
@@ -772,16 +758,31 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
goto err_free;
}
- obj = __i915_gem_object_create_stolen(mem, stolen);
- if (IS_ERR(obj))
+ obj = i915_gem_object_alloc();
+ if (!obj) {
+ obj = ERR_PTR(-ENOMEM);
goto err_stolen;
+ }
+
+ ret = __i915_gem_object_create_stolen(mem, obj, stolen);
+ if (ret) {
+ obj = ERR_PTR(ret);
+ goto err_object_free;
+ }
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
return obj;
+err_object_free:
+ i915_gem_object_free(obj);
err_stolen:
i915_gem_stolen_remove_node(i915, stolen);
err_free:
kfree(stolen);
return obj;
}
+
+bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops == &i915_gem_object_stolen_ops;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index 61e028063f9f..b03489706796 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -30,6 +30,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
resource_size_t stolen_offset,
resource_size_t size);
+bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj);
+
#define I915_GEM_STOLEN_BIAS SZ_128K
#endif /* __I915_GEM_STOLEN_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index ffcaee74a249..d589d3d81085 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -270,14 +270,14 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
obj->mm.madv == I915_MADV_WILLNEED &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (tiling == I915_TILING_NONE) {
- GEM_BUG_ON(!obj->mm.quirked);
- __i915_gem_object_unpin_pages(obj);
- obj->mm.quirked = false;
+ GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_clear_tiling_quirk(obj);
+ i915_gem_object_make_shrinkable(obj);
}
if (!i915_gem_object_is_tiled(obj)) {
- GEM_BUG_ON(obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
+ GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_make_unshrinkable(obj);
+ i915_gem_object_set_tiling_quirk(obj);
}
}
mutex_unlock(&obj->mm.lock);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 8af55cd3e690..4b9856d5ba14 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -5,10 +5,12 @@
*/
#include <linux/dma-fence-array.h>
+#include <linux/dma-fence-chain.h>
#include <linux/jiffies.h>
#include "gt/intel_engine.h"
+#include "dma_resv_utils.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
@@ -43,8 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
unsigned int count, i;
int ret;
- ret = dma_resv_get_fences_rcu(resv,
- &excl, &count, &shared);
+ ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
if (ret)
return ret;
@@ -84,17 +85,14 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
* Opportunistically prune the fences iff we know they have *all* been
* signaled.
*/
- if (prune_fences && dma_resv_trylock(resv)) {
- if (dma_resv_test_signaled_rcu(resv, true))
- dma_resv_add_excl_fence(resv, NULL);
- dma_resv_unlock(resv);
- }
+ if (prune_fences)
+ dma_resv_prune(resv);
return timeout;
}
-static void __fence_set_priority(struct dma_fence *fence,
- const struct i915_sched_attr *attr)
+static void fence_set_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr)
{
struct i915_request *rq;
struct intel_engine_cs *engine;
@@ -105,27 +103,47 @@ static void __fence_set_priority(struct dma_fence *fence,
rq = to_request(fence);
engine = rq->engine;
- local_bh_disable();
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
if (engine->schedule)
engine->schedule(rq, attr);
rcu_read_unlock();
- local_bh_enable(); /* kick the tasklets if queues were reprioritised */
}
-static void fence_set_priority(struct dma_fence *fence,
- const struct i915_sched_attr *attr)
+static inline bool __dma_fence_is_chain(const struct dma_fence *fence)
{
+ return fence->ops == &dma_fence_chain_ops;
+}
+
+void i915_gem_fence_wait_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr)
+{
+ if (dma_fence_is_signaled(fence))
+ return;
+
+ local_bh_disable();
+
/* Recurse once into a fence-array */
if (dma_fence_is_array(fence)) {
struct dma_fence_array *array = to_dma_fence_array(fence);
int i;
for (i = 0; i < array->num_fences; i++)
- __fence_set_priority(array->fences[i], attr);
+ fence_set_priority(array->fences[i], attr);
+ } else if (__dma_fence_is_chain(fence)) {
+ struct dma_fence *iter;
+
+ /* The chain is ordered; if we boost the last, we boost all */
+ dma_fence_chain_for_each(iter, fence) {
+ fence_set_priority(to_dma_fence_chain(iter)->fence,
+ attr);
+ break;
+ }
+ dma_fence_put(iter);
} else {
- __fence_set_priority(fence, attr);
+ fence_set_priority(fence, attr);
}
+
+ local_bh_enable(); /* kick the tasklets if queues were reprioritised */
}
int
@@ -141,12 +159,12 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
int ret;
ret = dma_resv_get_fences_rcu(obj->base.resv,
- &excl, &count, &shared);
+ &excl, &count, &shared);
if (ret)
return ret;
for (i = 0; i < count; i++) {
- fence_set_priority(shared[i], attr);
+ i915_gem_fence_wait_priority(shared[i], attr);
dma_fence_put(shared[i]);
}
@@ -156,7 +174,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
}
if (excl) {
- fence_set_priority(excl, attr);
+ i915_gem_fence_wait_priority(excl, attr);
dma_fence_put(excl);
}
return 0;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index a768ec61e966..2fb501a78a85 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -27,7 +27,7 @@ static void huge_free_pages(struct drm_i915_gem_object *obj,
static int huge_get_pages(struct drm_i915_gem_object *obj)
{
-#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
+#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL)
const unsigned long nreal = obj->scratch / PAGE_SIZE;
const unsigned long npages = obj->base.size / PAGE_SIZE;
struct scatterlist *sg, *src, *end;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 1f35e71429b4..aacf4856ccb4 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -368,6 +368,27 @@ static int igt_check_page_sizes(struct i915_vma *vma)
err = -EINVAL;
}
+ /*
+ * The dma-api is like a box of chocolates when it comes to the
+ * alignment of dma addresses, however for LMEM we have total control
+ * and so can guarantee alignment, likewise when we allocate our blocks
+ * they should appear in descending order, and if we know that we align
+ * to the largest page size for the GTT address, we should be able to
+ * assert that if we see 2M physical pages then we should also get 2M
+ * GTT pages. If we don't then something might be wrong in our
+ * construction of the backing pages.
+ *
+ * Maintaining alignment is required to utilise huge pages in the ppGGT.
+ */
+ if (i915_gem_object_is_lmem(obj) &&
+ IS_ALIGNED(vma->node.start, SZ_2M) &&
+ vma->page_sizes.sg & SZ_2M &&
+ vma->page_sizes.gtt < SZ_2M) {
+ pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n",
+ vma->page_sizes.sg, vma->page_sizes.gtt);
+ err = -EINVAL;
+ }
+
if (obj->mm.page_sizes.gtt) {
pr_err("obj->page_sizes.gtt(%u) should never be set\n",
obj->mm.page_sizes.gtt);
@@ -1333,6 +1354,7 @@ static int igt_ppgtt_sanity_check(void *arg)
unsigned int flags;
} backends[] = {
{ igt_create_system, 0, },
+ { igt_create_local, 0, },
{ igt_create_local, I915_BO_ALLOC_CONTIGUOUS, },
};
struct {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 4e36d4897ea6..6a674a7994df 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -20,13 +20,11 @@ static int __igt_client_fill(struct intel_engine_cs *engine)
{
struct intel_context *ce = engine->kernel_context;
struct drm_i915_gem_object *obj;
- struct rnd_state prng;
+ I915_RND_STATE(prng);
IGT_TIMEOUT(end);
u32 *vaddr;
int err = 0;
- prandom_seed_state(&prng, i915_selftest.random_seed);
-
intel_engine_pm_get(engine);
do {
const u32 max_block_size = S16_MAX * PAGE_SIZE;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 7049a6bbc03d..1117d2a44518 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -7,6 +7,7 @@
#include <linux/prime_numbers.h>
#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_ring.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index d27d87a678c8..d429c7643ff2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -7,6 +7,7 @@
#include <linux/prime_numbers.h>
#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gem/i915_gem_region.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index e21b5023ca7d..d6783061bc72 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -9,6 +9,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "i915_vma.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
index 174a24553322..d4f4452ce5ed 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
@@ -11,6 +11,7 @@
#include "i915_drv.h"
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
+#include "intel_gt_pm.h"
#include "intel_llc.h"
#include "intel_rc6.h"
#include "intel_rps.h"
@@ -403,34 +404,34 @@ static int frequency_show(struct seq_file *m, void *unused)
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
- seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
+ seq_printf(m, "RP CUR UP EI: %d (%lldns)\n",
rpcurupei,
intel_gt_pm_interval_to_ns(gt, rpcurupei));
- seq_printf(m, "RP CUR UP: %d (%dns)\n",
+ seq_printf(m, "RP CUR UP: %d (%lldns)\n",
rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup));
- seq_printf(m, "RP PREV UP: %d (%dns)\n",
+ seq_printf(m, "RP PREV UP: %d (%lldns)\n",
rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup));
seq_printf(m, "Up threshold: %d%%\n",
rps->power.up_threshold);
- seq_printf(m, "RP UP EI: %d (%dns)\n",
+ seq_printf(m, "RP UP EI: %d (%lldns)\n",
rpupei, intel_gt_pm_interval_to_ns(gt, rpupei));
- seq_printf(m, "RP UP THRESHOLD: %d (%dns)\n",
+ seq_printf(m, "RP UP THRESHOLD: %d (%lldns)\n",
rpupt, intel_gt_pm_interval_to_ns(gt, rpupt));
- seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
+ seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n",
rpcurdownei,
intel_gt_pm_interval_to_ns(gt, rpcurdownei));
- seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
+ seq_printf(m, "RP CUR DOWN: %d (%lldns)\n",
rpcurdown,
intel_gt_pm_interval_to_ns(gt, rpcurdown));
- seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
+ seq_printf(m, "RP PREV DOWN: %d (%lldns)\n",
rpprevdown,
intel_gt_pm_interval_to_ns(gt, rpprevdown));
seq_printf(m, "Down threshold: %d%%\n",
rps->power.down_threshold);
- seq_printf(m, "RP DOWN EI: %d (%dns)\n",
+ seq_printf(m, "RP DOWN EI: %d (%lldns)\n",
rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei));
- seq_printf(m, "RP DOWN THRESHOLD: %d (%dns)\n",
+ seq_printf(m, "RP DOWN THRESHOLD: %d (%lldns)\n",
rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt));
max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
@@ -558,7 +559,9 @@ static int rps_boost_show(struct seq_file *m, void *data)
seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
- seq_printf(m, "GPU busy? %s\n", yesno(gt->awake));
+ seq_printf(m, "GPU busy? %s, %llums\n",
+ yesno(gt->awake),
+ ktime_to_ms(intel_gt_get_awake_time(gt)));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
@@ -575,7 +578,7 @@ static int rps_boost_show(struct seq_file *m, void *data)
intel_gpu_freq(rps, rps->efficient_freq),
intel_gpu_freq(rps, rps->boost_freq));
- seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
+ seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
if (INTEL_GEN(i915) >= 6 && intel_rps_is_active(rps)) {
struct intel_uncore *uncore = gt->uncore;
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index 680bd9442eb0..e08dff376339 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -12,9 +12,9 @@
#include "intel_gt.h"
/* Write pde (index) from the page directory @pd to the page table @pt */
-static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
- const unsigned int pde,
- const struct i915_page_table *pt)
+static void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
+ const unsigned int pde,
+ const struct i915_page_table *pt)
{
dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]);
@@ -27,8 +27,6 @@ void gen7_ppgtt_enable(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
u32 ecochk;
intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
@@ -41,13 +39,6 @@ void gen7_ppgtt_enable(struct intel_gt *gt)
ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
}
intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
-
- for_each_engine(engine, gt, id) {
- /* GFX_MODE is per-ring on gen7+ */
- ENGINE_WRITE(engine,
- RING_MODE_GEN7,
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
- }
}
void gen6_ppgtt_enable(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
index e961ad6a3129..8551e6de50e8 100644
--- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -40,7 +40,7 @@ struct batch_vals {
u32 size;
};
-static inline int num_primitives(const struct batch_vals *bv)
+static int num_primitives(const struct batch_vals *bv)
{
/*
* We need to saturate the GPU with work in order to dispatch
@@ -353,19 +353,21 @@ static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch)
{
- u32 *cs = batch_alloc_items(batch, 0, 8);
+ u32 *cs = batch_alloc_items(batch, 0, 10);
/* ivb: Stall before STATE_CACHE_INVALIDATE */
- *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
*cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD |
PIPE_CONTROL_CS_STALL;
*cs++ = 0;
*cs++ = 0;
+ *cs++ = 0;
- *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
*cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE;
*cs++ = 0;
*cs++ = 0;
+ *cs++ = 0;
batch_advance(batch, cs);
}
@@ -397,6 +399,7 @@ static void emit_batch(struct i915_vma * const vma,
batch_add(&cmds, 0xffff0000);
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+ gen7_emit_pipeline_invalidate(&cmds);
gen7_emit_pipeline_flush(&cmds);
/* Switch to the media pipeline and our base address */
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
new file mode 100644
index 000000000000..07ba524da90b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -0,0 +1,635 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#include "gen8_engine_cs.h"
+#include "i915_drv.h"
+#include "intel_lrc.h"
+#include "intel_gpu_commands.h"
+#include "intel_ring.h"
+
+int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ bool vf_flush_wa = false, dc_flush_wa = false;
+ u32 *cs, flags = 0;
+ int len;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ if (mode & EMIT_FLUSH) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+ /*
+ * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
+ * pipe control.
+ */
+ if (IS_GEN(rq->engine->i915, 9))
+ vf_flush_wa = true;
+
+ /* WaForGAMHang:kbl */
+ if (IS_KBL_GT_REVID(rq->engine->i915, 0, KBL_REVID_B0))
+ dc_flush_wa = true;
+ }
+
+ len = 6;
+
+ if (vf_flush_wa)
+ len += 6;
+
+ if (dc_flush_wa)
+ len += 12;
+
+ cs = intel_ring_begin(rq, len);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ if (vf_flush_wa)
+ cs = gen8_emit_pipe_control(cs, 0, 0);
+
+ if (dc_flush_wa)
+ cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
+ 0);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+
+ if (dc_flush_wa)
+ cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode)
+{
+ u32 cmd, *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cmd = MI_FLUSH_DW + 1;
+
+ /*
+ * We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+ if (mode & EMIT_INVALIDATE) {
+ cmd |= MI_INVALIDATE_TLB;
+ if (rq->engine->class == VIDEO_DECODE_CLASS)
+ cmd |= MI_INVALIDATE_BSD;
+ }
+
+ *cs++ = cmd;
+ *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
+ *cs++ = 0; /* upper addr */
+ *cs++ = 0; /* value */
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ if (mode & EMIT_FLUSH) {
+ u32 *cs;
+ u32 flags = 0;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(rq, cs);
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ u32 *cs;
+ u32 flags = 0;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(rq, cs);
+ }
+
+ return 0;
+}
+
+static u32 preparser_disable(bool state)
+{
+ return MI_ARB_CHECK | 1 << 8 | state;
+}
+
+static i915_reg_t aux_inv_reg(const struct intel_engine_cs *engine)
+{
+ static const i915_reg_t vd[] = {
+ GEN12_VD0_AUX_NV,
+ GEN12_VD1_AUX_NV,
+ GEN12_VD2_AUX_NV,
+ GEN12_VD3_AUX_NV,
+ };
+
+ static const i915_reg_t ve[] = {
+ GEN12_VE0_AUX_NV,
+ GEN12_VE1_AUX_NV,
+ };
+
+ if (engine->class == VIDEO_DECODE_CLASS)
+ return vd[engine->instance];
+
+ if (engine->class == VIDEO_ENHANCEMENT_CLASS)
+ return ve[engine->instance];
+
+ GEM_BUG_ON("unknown aux_inv reg\n");
+ return INVALID_MMIO_REG;
+}
+
+static u32 *gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs)
+{
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(inv_reg);
+ *cs++ = AUX_INV;
+ *cs++ = MI_NOOP;
+
+ return cs;
+}
+
+int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ if (mode & EMIT_FLUSH) {
+ u32 flags = 0;
+ u32 *cs;
+
+ flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_FLUSH_L3;
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ /* Wa_1409600907:tgl */
+ flags |= PIPE_CONTROL_DEPTH_STALL;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen12_emit_pipe_control(cs,
+ PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
+ flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(rq, cs);
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ u32 flags = 0;
+ u32 *cs;
+
+ flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ cs = intel_ring_begin(rq, 8 + 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Prevent the pre-parser from skipping past the TLB
+ * invalidate and loading a stale page for the batch
+ * buffer / request payload.
+ */
+ *cs++ = preparser_disable(true);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+
+ /* hsdes: 1809175790 */
+ cs = gen12_emit_aux_table_inv(GEN12_GFX_CCS_AUX_NV, cs);
+
+ *cs++ = preparser_disable(false);
+ intel_ring_advance(rq, cs);
+ }
+
+ return 0;
+}
+
+int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
+{
+ intel_engine_mask_t aux_inv = 0;
+ u32 cmd, *cs;
+
+ cmd = 4;
+ if (mode & EMIT_INVALIDATE)
+ cmd += 2;
+ if (mode & EMIT_INVALIDATE)
+ aux_inv = rq->engine->mask & ~BIT(BCS0);
+ if (aux_inv)
+ cmd += 2 * hweight8(aux_inv) + 2;
+
+ cs = intel_ring_begin(rq, cmd);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ if (mode & EMIT_INVALIDATE)
+ *cs++ = preparser_disable(true);
+
+ cmd = MI_FLUSH_DW + 1;
+
+ /*
+ * We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+ if (mode & EMIT_INVALIDATE) {
+ cmd |= MI_INVALIDATE_TLB;
+ if (rq->engine->class == VIDEO_DECODE_CLASS)
+ cmd |= MI_INVALIDATE_BSD;
+ }
+
+ *cs++ = cmd;
+ *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
+ *cs++ = 0; /* upper addr */
+ *cs++ = 0; /* value */
+
+ if (aux_inv) { /* hsdes: 1809175790 */
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(hweight8(aux_inv));
+ for_each_engine_masked(engine, rq->engine->gt,
+ aux_inv, tmp) {
+ *cs++ = i915_mmio_reg_offset(aux_inv_reg(engine));
+ *cs++ = AUX_INV;
+ }
+ *cs++ = MI_NOOP;
+ }
+
+ if (mode & EMIT_INVALIDATE)
+ *cs++ = preparser_disable(false);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static u32 preempt_address(struct intel_engine_cs *engine)
+{
+ return (i915_ggtt_offset(engine->status_page.vma) +
+ I915_GEM_HWS_PREEMPT_ADDR);
+}
+
+static u32 hwsp_offset(const struct i915_request *rq)
+{
+ const struct intel_timeline_cacheline *cl;
+
+ /* Before the request is executed, the timeline/cachline is fixed */
+
+ cl = rcu_dereference_protected(rq->hwsp_cacheline, 1);
+ if (cl)
+ return cl->ggtt_offset;
+
+ return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset;
+}
+
+int gen8_emit_init_breadcrumb(struct i915_request *rq)
+{
+ u32 *cs;
+
+ GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
+ if (!i915_request_timeline(rq)->has_initial_breadcrumb)
+ return 0;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = hwsp_offset(rq);
+ *cs++ = 0;
+ *cs++ = rq->fence.seqno - 1;
+
+ /*
+ * Check if we have been preempted before we even get started.
+ *
+ * After this point i915_request_started() reports true, even if
+ * we get preempted and so are no longer running.
+ *
+ * i915_request_started() is used during preemption processing
+ * to decide if the request is currently inside the user payload
+ * or spinning on a kernel semaphore (or earlier). For no-preemption
+ * requests, we do allow preemption on the semaphore before the user
+ * payload, but do not allow preemption once the request is started.
+ *
+ * i915_request_started() is similarly used during GPU hangs to
+ * determine if the user's payload was guilty, and if so, the
+ * request is banned. Before the request is started, it is assumed
+ * to be unharmed and an innocent victim of another's hang.
+ */
+ *cs++ = MI_NOOP;
+ *cs++ = MI_ARB_CHECK;
+
+ intel_ring_advance(rq, cs);
+
+ /* Record the updated position of the request's payload */
+ rq->infix = intel_ring_offset(rq, cs);
+
+ __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
+
+ return 0;
+}
+
+int gen8_emit_bb_start_noarb(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * WaDisableCtxRestoreArbitration:bdw,chv
+ *
+ * We don't need to perform MI_ARB_ENABLE as often as we do (in
+ * particular all the gen that do not need the w/a at all!), if we
+ * took care to make sure that on every switch into this context
+ * (both ordinary and for preemption) that arbitrartion was enabled
+ * we would be fine. However, for gen8 there is another w/a that
+ * requires us to not preempt inside GPGPU execution, so we keep
+ * arbitration disabled for gen8 batches. Arbitration will be
+ * re-enabled before we close the request
+ * (engine->emit_fini_breadcrumb).
+ */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ /* FIXME(BDW+): Address space and security selectors. */
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen8_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ u32 *cs;
+
+ if (unlikely(i915_request_has_nopreempt(rq)))
+ return gen8_emit_bb_start_noarb(rq, offset, len, flags);
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static void assert_request_valid(struct i915_request *rq)
+{
+ struct intel_ring *ring __maybe_unused = rq->ring;
+
+ /* Can we unwind this request without appearing to go forwards? */
+ GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
+}
+
+/*
+ * Reserve space for 2 NOOPs at the end of each request to be
+ * used as a workaround for not being allowed to do lite
+ * restore with HEAD==TAIL (WaIdleLiteRestore).
+ */
+static u32 *gen8_emit_wa_tail(struct i915_request *rq, u32 *cs)
+{
+ /* Ensure there's always at least one preemption point per-request. */
+ *cs++ = MI_ARB_CHECK;
+ *cs++ = MI_NOOP;
+ rq->wa_tail = intel_ring_offset(rq, cs);
+
+ /* Check that entire request is less than half the ring */
+ assert_request_valid(rq);
+
+ return cs;
+}
+
+static u32 *emit_preempt_busywait(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = preempt_address(rq->engine);
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+
+ return cs;
+}
+
+static __always_inline u32*
+gen8_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_USER_INTERRUPT;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ if (intel_engine_has_semaphores(rq->engine))
+ cs = emit_preempt_busywait(rq, cs);
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return gen8_emit_wa_tail(rq, cs);
+}
+
+static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
+}
+
+u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
+{
+ return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
+}
+
+u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ cs = gen8_emit_pipe_control(cs,
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE,
+ 0);
+
+ /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ rq->fence.seqno,
+ hwsp_offset(rq),
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_CS_STALL);
+
+ return gen8_emit_fini_breadcrumb_tail(rq, cs);
+}
+
+u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ rq->fence.seqno,
+ hwsp_offset(rq),
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE);
+
+ return gen8_emit_fini_breadcrumb_tail(rq, cs);
+}
+
+/*
+ * Note that the CS instruction pre-parser will not stall on the breadcrumb
+ * flush and will continue pre-fetching the instructions after it before the
+ * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at
+ * BB_START/END instructions, so, even though we might pre-fetch the pre-amble
+ * of the next request before the memory has been flushed, we're guaranteed that
+ * we won't access the batch itself too early.
+ * However, on gen12+ the parser can pre-fetch across the BB_START/END commands,
+ * so, if the current request is modifying an instruction in the next request on
+ * the same intel_context, we might pre-fetch and then execute the pre-update
+ * instruction. To avoid this, the users of self-modifying code should either
+ * disable the parser around the code emitting the memory writes, via a new flag
+ * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For
+ * the in-kernel use-cases we've opted to use a separate context, see
+ * reloc_gpu() as an example.
+ * All the above applies only to the instructions themselves. Non-inline data
+ * used by the instructions is not pre-fetched.
+ */
+
+static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
+ *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = preempt_address(rq->engine);
+ *cs++ = 0;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static __always_inline u32*
+gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_USER_INTERRUPT;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ if (intel_engine_has_semaphores(rq->engine))
+ cs = gen12_emit_preempt_busywait(rq, cs);
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return gen8_emit_wa_tail(rq, cs);
+}
+
+u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
+{
+ /* XXX Stalling flush before seqno write; post-sync not */
+ cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
+ return gen12_emit_fini_breadcrumb_tail(rq, cs);
+}
+
+u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ cs = gen12_emit_ggtt_write_rcs(cs,
+ rq->fence.seqno,
+ hwsp_offset(rq),
+ PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
+ PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ /* Wa_1409600907:tgl */
+ PIPE_CONTROL_DEPTH_STALL |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE);
+
+ return gen12_emit_fini_breadcrumb_tail(rq, cs);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
new file mode 100644
index 000000000000..cc6e21d3662a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#ifndef __GEN8_ENGINE_CS_H__
+#define __GEN8_ENGINE_CS_H__
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "i915_gem.h" /* GEM_BUG_ON */
+
+#include "intel_gpu_commands.h"
+
+struct i915_request;
+
+int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode);
+int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode);
+int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode);
+
+int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode);
+int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode);
+
+int gen8_emit_init_breadcrumb(struct i915_request *rq);
+
+int gen8_emit_bb_start_noarb(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags);
+int gen8_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags);
+
+u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
+u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
+
+u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+
+static inline u32 *
+__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+{
+ memset(batch, 0, 6 * sizeof(u32));
+
+ batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
+ batch[1] = flags1;
+ batch[2] = offset;
+
+ return batch + 6;
+}
+
+static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
+{
+ return __gen8_emit_pipe_control(batch, 0, flags, offset);
+}
+
+static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+{
+ return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
+}
+
+static inline u32 *
+__gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1)
+{
+ *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
+ *cs++ = flags1 | PIPE_CONTROL_QW_WRITE;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = value;
+ *cs++ = 0; /* We're thrashing one extra dword. */
+
+ return cs;
+}
+
+static inline u32*
+gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+ /* We're using qword write, offset should be aligned to 8 bytes. */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ return __gen8_emit_write_rcs(cs,
+ value,
+ gtt_offset,
+ 0,
+ flags | PIPE_CONTROL_GLOBAL_GTT_IVB);
+}
+
+static inline u32*
+gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
+{
+ /* We're using qword write, offset should be aligned to 8 bytes. */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ return __gen8_emit_write_rcs(cs,
+ value,
+ gtt_offset,
+ flags0,
+ flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB);
+}
+
+static inline u32 *
+__gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+ *cs++ = (MI_FLUSH_DW + 1) | flags;
+ *cs++ = gtt_offset;
+ *cs++ = 0;
+ *cs++ = value;
+
+ return cs;
+}
+
+static inline u32 *
+gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+ /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
+ GEM_BUG_ON(gtt_offset & (1 << 5));
+ /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ return __gen8_emit_flush_dw(cs,
+ value,
+ gtt_offset | MI_FLUSH_DW_USE_GTT,
+ flags | MI_FLUSH_DW_OP_STOREDW);
+}
+
+#endif /* __GEN8_ENGINE_CS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index a37c968ef8f7..755522ced60d 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -109,7 +109,7 @@ static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
-static inline unsigned int
+static unsigned int
gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
{
const int shift = gen8_pd_shift(lvl);
@@ -125,7 +125,7 @@ gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
return i915_pde_index(end, shift) - *idx;
}
-static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
+static bool gen8_pd_contains(u64 start, u64 end, int lvl)
{
const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
@@ -133,7 +133,7 @@ static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
return (start ^ end) & mask && (start & ~mask) == 0;
}
-static inline unsigned int gen8_pt_count(u64 start, u64 end)
+static unsigned int gen8_pt_count(u64 start, u64 end)
{
GEM_BUG_ON(start >= end);
if ((start ^ end) >> gen8_pd_shift(1))
@@ -142,14 +142,13 @@ static inline unsigned int gen8_pt_count(u64 start, u64 end)
return end - start;
}
-static inline unsigned int
-gen8_pd_top_count(const struct i915_address_space *vm)
+static unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
{
unsigned int shift = __gen8_pte_shift(vm->top);
return (vm->total + (1ull << shift) - 1) >> shift;
}
-static inline struct i915_page_directory *
+static struct i915_page_directory *
gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
{
struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
@@ -160,7 +159,7 @@ gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
}
-static inline struct i915_page_directory *
+static struct i915_page_directory *
gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
{
return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 1d1757584f49..34a645d6babd 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -234,6 +234,7 @@ static void signal_irq_work(struct irq_work *work)
intel_breadcrumbs_disarm_irq(b);
rcu_read_lock();
+ atomic_inc(&b->signaler_active);
list_for_each_entry_rcu(ce, &b->signalers, signal_link) {
struct i915_request *rq;
@@ -256,19 +257,20 @@ static void signal_irq_work(struct irq_work *work)
list_del_rcu(&rq->signal_link);
release = remove_signaling_context(b, ce);
spin_unlock(&ce->signal_lock);
+ if (release) {
+ if (intel_timeline_is_last(ce->timeline, rq))
+ add_retire(b, ce->timeline);
+ intel_context_put(ce);
+ }
if (__dma_fence_signal(&rq->fence))
/* We own signal_node now, xfer to local list */
signal = slist_add(&rq->signal_node, signal);
else
i915_request_put(rq);
-
- if (release) {
- add_retire(b, ce->timeline);
- intel_context_put(ce);
- }
}
}
+ atomic_dec(&b->signaler_active);
rcu_read_unlock();
llist_for_each_safe(signal, sn, signal) {
@@ -327,17 +329,19 @@ void intel_breadcrumbs_reset(struct intel_breadcrumbs *b)
spin_unlock_irqrestore(&b->irq_lock, flags);
}
-void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
+void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
{
- /* Kick the work once more to drain the signalers */
+ if (!READ_ONCE(b->irq_armed))
+ return;
+
+ /* Kick the work once more to drain the signalers, and disarm the irq */
irq_work_sync(&b->irq_work);
- while (unlikely(READ_ONCE(b->irq_armed))) {
+ while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
local_irq_disable();
signal_irq_work(&b->irq_work);
local_irq_enable();
cond_resched();
}
- GEM_BUG_ON(!list_empty(&b->signalers));
}
void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
@@ -469,6 +473,39 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
i915_request_put(rq);
}
+void intel_context_remove_breadcrumbs(struct intel_context *ce,
+ struct intel_breadcrumbs *b)
+{
+ struct i915_request *rq, *rn;
+ bool release = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ce->signal_lock, flags);
+
+ if (list_empty(&ce->signals))
+ goto unlock;
+
+ list_for_each_entry_safe(rq, rn, &ce->signals, signal_link) {
+ GEM_BUG_ON(!__i915_request_is_complete(rq));
+ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
+ &rq->fence.flags))
+ continue;
+
+ list_del_rcu(&rq->signal_link);
+ irq_signal_request(rq, b);
+ i915_request_put(rq);
+ }
+ release = remove_signaling_context(b, ce);
+
+unlock:
+ spin_unlock_irqrestore(&ce->signal_lock, flags);
+ if (release)
+ intel_context_put(ce);
+
+ while (atomic_read(&b->signaler_active))
+ cpu_relax();
+}
+
static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
{
struct intel_context *ce;
@@ -481,8 +518,8 @@ static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
list_for_each_entry_rcu(rq, &ce->signals, signal_link)
drm_printf(p, "\t[%llx:%llx%s] @ %dms\n",
rq->fence.context, rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
+ __i915_request_is_complete(rq) ? "!" :
+ __i915_request_has_started(rq) ? "*" :
"",
jiffies_to_msecs(jiffies - rq->emitted_jiffies));
}
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h
index ed3d1deabfbd..3ce5ce270b04 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h
@@ -6,6 +6,7 @@
#ifndef __INTEL_BREADCRUMBS__
#define __INTEL_BREADCRUMBS__
+#include <linux/atomic.h>
#include <linux/irq_work.h>
#include "intel_engine_types.h"
@@ -19,7 +20,18 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine);
void intel_breadcrumbs_free(struct intel_breadcrumbs *b);
void intel_breadcrumbs_reset(struct intel_breadcrumbs *b);
-void intel_breadcrumbs_park(struct intel_breadcrumbs *b);
+void __intel_breadcrumbs_park(struct intel_breadcrumbs *b);
+
+static inline void intel_breadcrumbs_unpark(struct intel_breadcrumbs *b)
+{
+ atomic_inc(&b->active);
+}
+
+static inline void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
+{
+ if (atomic_dec_and_test(&b->active))
+ __intel_breadcrumbs_park(b);
+}
static inline void
intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
@@ -33,4 +45,7 @@ void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
bool i915_request_enable_breadcrumb(struct i915_request *request);
void i915_request_cancel_breadcrumb(struct i915_request *request);
+void intel_context_remove_breadcrumbs(struct intel_context *ce,
+ struct intel_breadcrumbs *b);
+
#endif /* __INTEL_BREADCRUMBS__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
index a74bb3062bd8..3a084ce8ff5e 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
@@ -29,17 +29,20 @@
* the overhead of waking that client is much preferred.
*/
struct intel_breadcrumbs {
- /* Not all breadcrumbs are attached to physical HW */
- struct intel_engine_cs *irq_engine;
+ atomic_t active;
spinlock_t signalers_lock; /* protects the list of signalers */
struct list_head signalers;
struct llist_head signaled_requests;
+ atomic_t signaler_active;
spinlock_t irq_lock; /* protects the interrupt from hardirq context */
struct irq_work irq_work; /* for use from inside irq_lock */
unsigned int irq_enabled;
bool irq_armed;
+
+ /* Not all breadcrumbs are attached to physical HW */
+ struct intel_engine_cs *irq_engine;
};
#endif /* __INTEL_BREADCRUMBS_TYPES__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index fda2eba81e22..d24ab6fa0ee5 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -191,6 +191,11 @@ static inline bool intel_context_is_closed(const struct intel_context *ce)
return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
}
+static inline bool intel_context_has_inflight(const struct intel_context *ce)
+{
+ return test_bit(COPS_HAS_INFLIGHT_BIT, &ce->ops->flags);
+}
+
static inline bool intel_context_use_semaphores(const struct intel_context *ce)
{
return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
@@ -248,16 +253,14 @@ intel_context_clear_nopreempt(struct intel_context *ce)
static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce)
{
- const u32 period =
- RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
+ const u32 period = ce->engine->gt->clock_period_ns;
return READ_ONCE(ce->runtime.total) * period;
}
static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
{
- const u32 period =
- RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
+ const u32 period = ce->engine->gt->clock_period_ns;
return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
index b9c8163978a3..8dfd8f656aaa 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -9,7 +9,6 @@
#include "intel_engine_pm.h"
#include "intel_gpu_commands.h"
#include "intel_lrc.h"
-#include "intel_lrc_reg.h"
#include "intel_ring.h"
#include "intel_sseu.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 52fa9c132746..e10d78601bbd 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -30,6 +30,10 @@ struct intel_context;
struct intel_ring;
struct intel_context_ops {
+ unsigned long flags;
+#define COPS_HAS_INFLIGHT_BIT 0
+#define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT)
+
int (*alloc)(struct intel_context *ce);
int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
@@ -58,8 +62,12 @@ struct intel_context {
struct intel_engine_cs *engine;
struct intel_engine_cs *inflight;
-#define intel_context_inflight(ce) ptr_mask_bits(READ_ONCE((ce)->inflight), 2)
-#define intel_context_inflight_count(ce) ptr_unmask_bits(READ_ONCE((ce)->inflight), 2)
+#define __intel_context_inflight(engine) ptr_mask_bits(engine, 3)
+#define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3)
+#define intel_context_inflight(ce) \
+ __intel_context_inflight(READ_ONCE((ce)->inflight))
+#define intel_context_inflight_count(ce) \
+ __intel_context_inflight_count(READ_ONCE((ce)->inflight))
struct i915_address_space *vm;
struct i915_gem_context __rcu *gem_context;
@@ -81,12 +89,13 @@ struct intel_context {
unsigned long flags;
#define CONTEXT_BARRIER_BIT 0
#define CONTEXT_ALLOC_BIT 1
-#define CONTEXT_VALID_BIT 2
-#define CONTEXT_CLOSED_BIT 3
-#define CONTEXT_USE_SEMAPHORES 4
-#define CONTEXT_BANNED 5
-#define CONTEXT_FORCE_SINGLE_SUBMISSION 6
-#define CONTEXT_NOPREEMPT 7
+#define CONTEXT_INIT_BIT 2
+#define CONTEXT_VALID_BIT 3
+#define CONTEXT_CLOSED_BIT 4
+#define CONTEXT_USE_SEMAPHORES 5
+#define CONTEXT_BANNED 6
+#define CONTEXT_FORCE_SINGLE_SUBMISSION 7
+#define CONTEXT_NOPREEMPT 8
u32 *lrc_reg_state;
union {
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 760fefdfe392..47ee8578e511 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -15,7 +15,6 @@
#include "i915_selftest.h"
#include "gt/intel_timeline.h"
#include "intel_engine_types.h"
-#include "intel_gpu_commands.h"
#include "intel_workarounds.h"
struct drm_printer;
@@ -223,91 +222,6 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
void intel_engine_init_execlists(struct intel_engine_cs *engine);
-static inline u32 *__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
-{
- memset(batch, 0, 6 * sizeof(u32));
-
- batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
- batch[1] = flags1;
- batch[2] = offset;
-
- return batch + 6;
-}
-
-static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
-{
- return __gen8_emit_pipe_control(batch, 0, flags, offset);
-}
-
-static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
-{
- return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
-}
-
-static inline u32 *
-__gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1)
-{
- *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
- *cs++ = flags1 | PIPE_CONTROL_QW_WRITE;
- *cs++ = offset;
- *cs++ = 0;
- *cs++ = value;
- *cs++ = 0; /* We're thrashing one extra dword. */
-
- return cs;
-}
-
-static inline u32*
-gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
-{
- /* We're using qword write, offset should be aligned to 8 bytes. */
- GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
-
- return __gen8_emit_write_rcs(cs,
- value,
- gtt_offset,
- 0,
- flags | PIPE_CONTROL_GLOBAL_GTT_IVB);
-}
-
-static inline u32*
-gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
-{
- /* We're using qword write, offset should be aligned to 8 bytes. */
- GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
-
- return __gen8_emit_write_rcs(cs,
- value,
- gtt_offset,
- flags0,
- flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB);
-}
-
-static inline u32 *
-__gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
-{
- *cs++ = (MI_FLUSH_DW + 1) | flags;
- *cs++ = gtt_offset;
- *cs++ = 0;
- *cs++ = value;
-
- return cs;
-}
-
-static inline u32 *
-gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
-{
- /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
- GEM_BUG_ON(gtt_offset & (1 << 5));
- /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
- GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
-
- return __gen8_emit_flush_dw(cs,
- value,
- gtt_offset | MI_FLUSH_DW_USE_GTT,
- flags | MI_FLUSH_DW_OP_STOREDW);
-}
-
static inline void __intel_engine_reset(struct intel_engine_cs *engine,
bool stalled)
{
@@ -318,7 +232,12 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine,
bool intel_engines_are_idle(struct intel_gt *gt);
bool intel_engine_is_idle(struct intel_engine_cs *engine);
-void intel_engine_flush_submission(struct intel_engine_cs *engine);
+
+void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync);
+static inline void intel_engine_flush_submission(struct intel_engine_cs *engine)
+{
+ __intel_engine_flush_submission(engine, true);
+}
void intel_engines_reset_default_submission(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 0b31670343f5..fb1b1d096975 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -33,12 +33,14 @@
#include "intel_engine.h"
#include "intel_engine_pm.h"
#include "intel_engine_user.h"
+#include "intel_execlists_submission.h"
#include "intel_gt.h"
#include "intel_gt_requests.h"
#include "intel_gt_pm.h"
-#include "intel_lrc.h"
+#include "intel_lrc_reg.h"
#include "intel_reset.h"
#include "intel_ring.h"
+#include "uc/intel_guc_submission.h"
/* Haswell does have the CXT_SIZE register however it does not appear to be
* valid. Now, docs explain in dwords what is in the context object. The full
@@ -340,7 +342,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->schedule = NULL;
ewma__engine_latency_init(&engine->latency);
- seqlock_init(&engine->stats.lock);
+ seqcount_init(&engine->stats.lock);
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
@@ -647,6 +649,8 @@ static int init_status_page(struct intel_engine_cs *engine)
void *vaddr;
int ret;
+ INIT_LIST_HEAD(&engine->status_page.timelines);
+
/*
* Though the HWS register does support 36bit addresses, historically
* we have had hangs and corruption reported due to wild writes if
@@ -723,6 +727,9 @@ static int engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_whitelist(engine);
intel_engine_init_ctx_wa(engine);
+ if (INTEL_GEN(engine->i915) >= 12)
+ engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
+
return 0;
err_status:
@@ -829,6 +836,21 @@ create_pinned_context(struct intel_engine_cs *engine,
return ce;
}
+static void destroy_pinned_context(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct i915_vma *hwsp = engine->status_page.vma;
+
+ GEM_BUG_ON(ce->timeline->hwsp_ggtt != hwsp);
+
+ mutex_lock(&hwsp->vm->mutex);
+ list_del(&ce->timeline->engine_link);
+ mutex_unlock(&hwsp->vm->mutex);
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+}
+
static struct intel_context *
create_kernel_context(struct intel_engine_cs *engine)
{
@@ -889,7 +911,9 @@ int intel_engines_init(struct intel_gt *gt)
enum intel_engine_id id;
int err;
- if (HAS_EXECLISTS(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ setup = intel_guc_submission_setup;
+ else if (HAS_EXECLISTS(gt->i915))
setup = intel_execlists_submission_setup;
else
setup = intel_ring_submission_setup;
@@ -925,7 +949,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
GEM_BUG_ON(!list_empty(&engine->active.requests));
tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
- cleanup_status_page(engine);
intel_breadcrumbs_free(engine->breadcrumbs);
intel_engine_fini_retire(engine);
@@ -934,11 +957,11 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state)
fput(engine->default_state);
- if (engine->kernel_context) {
- intel_context_unpin(engine->kernel_context);
- intel_context_put(engine->kernel_context);
- }
+ if (engine->kernel_context)
+ destroy_pinned_context(engine->kernel_context);
+
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
+ cleanup_status_page(engine);
intel_wa_list_free(&engine->ctx_wa_list);
intel_wa_list_free(&engine->wa_list);
@@ -1002,32 +1025,50 @@ static unsigned long stop_timeout(const struct intel_engine_cs *engine)
return READ_ONCE(engine->props.stop_timeout_ms);
}
-int intel_engine_stop_cs(struct intel_engine_cs *engine)
+static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
+ int fast_timeout_us,
+ int slow_timeout_ms)
{
struct intel_uncore *uncore = engine->uncore;
- const u32 base = engine->mmio_base;
- const i915_reg_t mode = RING_MI_MODE(base);
+ const i915_reg_t mode = RING_MI_MODE(engine->mmio_base);
int err;
+ intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
+ err = __intel_wait_for_register_fw(engine->uncore, mode,
+ MODE_IDLE, MODE_IDLE,
+ fast_timeout_us,
+ slow_timeout_ms,
+ NULL);
+
+ /* A final mmio read to let GPU writes be hopefully flushed to memory */
+ intel_uncore_posting_read_fw(uncore, mode);
+ return err;
+}
+
+int intel_engine_stop_cs(struct intel_engine_cs *engine)
+{
+ int err = 0;
+
if (INTEL_GEN(engine->i915) < 3)
return -ENODEV;
ENGINE_TRACE(engine, "\n");
+ if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {
+ ENGINE_TRACE(engine,
+ "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n",
+ ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR,
+ ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR);
- intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
-
- err = 0;
- if (__intel_wait_for_register_fw(uncore,
- mode, MODE_IDLE, MODE_IDLE,
- 1000, stop_timeout(engine),
- NULL)) {
- ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE\n");
- err = -ETIMEDOUT;
+ /*
+ * Sometimes we observe that the idle flag is not
+ * set even though the ring is empty. So double
+ * check before giving up.
+ */
+ if ((ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) !=
+ (ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR))
+ err = -ETIMEDOUT;
}
- /* A final mmio read to let GPU writes be hopefully flushed to memory */
- intel_uncore_posting_read_fw(uncore, mode);
-
return err;
}
@@ -1189,17 +1230,13 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
return idle;
}
-void intel_engine_flush_submission(struct intel_engine_cs *engine)
+void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
{
struct tasklet_struct *t = &engine->execlists.tasklet;
if (!t->func)
return;
- /* Synchronise and wait for the tasklet on another CPU */
- tasklet_kill(t);
-
- /* Having cancelled the tasklet, ensure that is run */
local_bh_disable();
if (tasklet_trylock(t)) {
/* Must wait for any GPU reset in progress. */
@@ -1208,6 +1245,10 @@ void intel_engine_flush_submission(struct intel_engine_cs *engine)
tasklet_unlock(t);
}
local_bh_enable();
+
+ /* Synchronise and wait for the tasklet on another CPU */
+ if (sync)
+ tasklet_unlock_wait(t);
}
/**
@@ -1273,8 +1314,12 @@ void intel_engines_reset_default_submission(struct intel_gt *gt)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, gt, id)
+ for_each_engine(engine, gt, id) {
+ if (engine->sanitize)
+ engine->sanitize(engine);
+
engine->set_default_submission(engine);
+ }
}
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
@@ -1294,44 +1339,6 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
}
}
-static int print_sched_attr(const struct i915_sched_attr *attr,
- char *buf, int x, int len)
-{
- if (attr->priority == I915_PRIORITY_INVALID)
- return x;
-
- x += snprintf(buf + x, len - x,
- " prio=%d", attr->priority);
-
- return x;
-}
-
-static void print_request(struct drm_printer *m,
- struct i915_request *rq,
- const char *prefix)
-{
- const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
- char buf[80] = "";
- int x = 0;
-
- x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
-
- drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
- prefix,
- rq->fence.context, rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
- "",
- test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &rq->fence.flags) ? "+" :
- test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &rq->fence.flags) ? "-" :
- "",
- buf,
- jiffies_to_msecs(jiffies - rq->emitted_jiffies),
- name);
-}
-
static struct intel_timeline *get_timeline(struct i915_request *rq)
{
struct intel_timeline *tl;
@@ -1480,7 +1487,9 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
}
- if (HAS_EXECLISTS(dev_priv)) {
+ if (intel_engine_in_guc_submission_mode(engine)) {
+ /* nothing to print yet */
+ } else if (HAS_EXECLISTS(dev_priv)) {
struct i915_request * const *port, *rq;
const u32 *hws =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
@@ -1529,7 +1538,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
intel_context_is_banned(rq->context) ? "*" : "");
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
- print_request(m, rq, hdr);
+ i915_request_show(m, rq, hdr, 0);
}
for (port = execlists->pending; (rq = *port); port++) {
char hdr[160];
@@ -1543,7 +1552,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
intel_context_is_banned(rq->context) ? "*" : "");
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
- print_request(m, rq, hdr);
+ i915_request_show(m, rq, hdr, 0);
}
rcu_read_unlock();
execlists_active_unlock_bh(execlists);
@@ -1667,7 +1676,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
ktime_to_ms(intel_engine_get_busy_time(engine,
&dummy)));
drm_printf(m, "\tForcewake: %x domains, %d active\n",
- engine->fw_domain, atomic_read(&engine->fw_active));
+ engine->fw_domain, READ_ONCE(engine->fw_active));
rcu_read_lock();
rq = READ_ONCE(engine->heartbeat.systole);
@@ -1687,7 +1696,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
if (rq) {
struct intel_timeline *tl = get_timeline(rq);
- print_request(m, rq, "\t\tactive ");
+ i915_request_show(m, rq, "\t\tactive ", 0);
drm_printf(m, "\t\tring->start: 0x%08x\n",
i915_ggtt_offset(rq->ring->vma));
@@ -1725,7 +1734,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
- intel_execlists_show_requests(engine, m, print_request, 8);
+ intel_execlists_show_requests(engine, m, i915_request_show, 8);
drm_printf(m, "HWSP:\n");
hexdump(m, engine->status_page.addr, PAGE_SIZE);
@@ -1745,7 +1754,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
* add it to the total.
*/
*now = ktime_get();
- if (atomic_read(&engine->stats.active))
+ if (READ_ONCE(engine->stats.active))
total = ktime_add(total, ktime_sub(*now, engine->stats.start));
return total;
@@ -1764,9 +1773,9 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
ktime_t total;
do {
- seq = read_seqbegin(&engine->stats.lock);
+ seq = read_seqcount_begin(&engine->stats.lock);
total = __intel_engine_get_busy_time(engine, now);
- } while (read_seqretry(&engine->stats.lock, seq));
+ } while (read_seqcount_retry(&engine->stats.lock, seq));
return total;
}
@@ -1802,7 +1811,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
struct intel_timeline *tl = request->context->timeline;
list_for_each_entry_from_reverse(request, &tl->requests, link) {
- if (i915_request_completed(request))
+ if (__i915_request_is_complete(request))
break;
active = request;
@@ -1813,10 +1822,10 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
return active;
list_for_each_entry(request, &engine->active.requests, sched.link) {
- if (i915_request_completed(request))
+ if (__i915_request_is_complete(request))
continue;
- if (!i915_request_started(request))
+ if (!__i915_request_has_started(request))
continue;
/* More than one preemptible request may match! */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 9060385cd69e..d7be2b9339f9 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -37,6 +37,18 @@ static bool next_heartbeat(struct intel_engine_cs *engine)
return true;
}
+static struct i915_request *
+heartbeat_create(struct intel_context *ce, gfp_t gfp)
+{
+ struct i915_request *rq;
+
+ intel_context_enter(ce);
+ rq = __i915_request_create(ce, gfp);
+ intel_context_exit(ce);
+
+ return rq;
+}
+
static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
{
engine->wakeref_serial = READ_ONCE(engine->serial) + 1;
@@ -45,6 +57,15 @@ static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
engine->heartbeat.systole = i915_request_get(rq);
}
+static void heartbeat_commit(struct i915_request *rq,
+ const struct i915_sched_attr *attr)
+{
+ idle_pulse(rq->engine, rq);
+
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, attr);
+}
+
static void show_heartbeat(const struct i915_request *rq,
struct intel_engine_cs *engine)
{
@@ -139,16 +160,11 @@ static void heartbeat(struct work_struct *wrk)
goto out;
}
- intel_context_enter(ce);
- rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
- intel_context_exit(ce);
+ rq = heartbeat_create(ce, GFP_NOWAIT | __GFP_NOWARN);
if (IS_ERR(rq))
goto unlock;
- idle_pulse(engine, rq);
-
- __i915_request_commit(rq);
- __i915_request_queue(rq, &attr);
+ heartbeat_commit(rq, &attr);
unlock:
mutex_unlock(&ce->timeline->mutex);
@@ -187,17 +203,13 @@ static int __intel_engine_pulse(struct intel_engine_cs *engine)
GEM_BUG_ON(!intel_engine_has_preemption(engine));
GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
- intel_context_enter(ce);
- rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
- intel_context_exit(ce);
+ rq = heartbeat_create(ce, GFP_NOWAIT | __GFP_NOWARN);
if (IS_ERR(rq))
return PTR_ERR(rq);
__set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
- idle_pulse(engine, rq);
- __i915_request_commit(rq);
- __i915_request_queue(rq, &attr);
+ heartbeat_commit(rq, &attr);
GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
return 0;
@@ -273,8 +285,12 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
int intel_engine_flush_barriers(struct intel_engine_cs *engine)
{
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MIN),
+ };
+ struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
- int err = 0;
+ int err;
if (llist_empty(&engine->barrier_tasks))
return 0;
@@ -282,15 +298,22 @@ int intel_engine_flush_barriers(struct intel_engine_cs *engine)
if (!intel_engine_pm_get_if_awake(engine))
return 0;
- rq = i915_request_create(engine->kernel_context);
+ if (mutex_lock_interruptible(&ce->timeline->mutex)) {
+ err = -EINTR;
+ goto out_rpm;
+ }
+
+ rq = heartbeat_create(ce, GFP_KERNEL);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_rpm;
+ goto out_unlock;
}
- idle_pulse(engine, rq);
- i915_request_add(rq);
+ heartbeat_commit(rq, &attr);
+ err = 0;
+out_unlock:
+ mutex_unlock(&ce->timeline->mutex);
out_rpm:
intel_engine_pm_put(engine);
return err;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 499b09cb4acf..e67d09259dd0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -60,18 +60,26 @@ static int __engine_unpark(struct intel_wakeref *wf)
/* Scrub the context image after our loss of control */
ce->ops->reset(ce);
+
+ CE_TRACE(ce, "reset { seqno:%x, *hwsp:%x, ring:%x }\n",
+ ce->timeline->seqno,
+ READ_ONCE(*ce->timeline->hwsp_seqno),
+ ce->ring->emit);
+ GEM_BUG_ON(ce->timeline->seqno !=
+ READ_ONCE(*ce->timeline->hwsp_seqno));
}
if (engine->unpark)
engine->unpark(engine);
+ intel_breadcrumbs_unpark(engine->breadcrumbs);
intel_engine_unpark_heartbeat(engine);
return 0;
}
#if IS_ENABLED(CONFIG_LOCKDEP)
-static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
+static unsigned long __timeline_mark_lock(struct intel_context *ce)
{
unsigned long flags;
@@ -81,8 +89,8 @@ static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
return flags;
}
-static inline void __timeline_mark_unlock(struct intel_context *ce,
- unsigned long flags)
+static void __timeline_mark_unlock(struct intel_context *ce,
+ unsigned long flags)
{
mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_);
local_irq_restore(flags);
@@ -90,13 +98,13 @@ static inline void __timeline_mark_unlock(struct intel_context *ce,
#else
-static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
+static unsigned long __timeline_mark_lock(struct intel_context *ce)
{
return 0;
}
-static inline void __timeline_mark_unlock(struct intel_context *ce,
- unsigned long flags)
+static void __timeline_mark_unlock(struct intel_context *ce,
+ unsigned long flags)
{
}
@@ -136,7 +144,7 @@ __queue_and_release_pm(struct i915_request *rq,
list_add_tail(&tl->link, &timelines->active_list);
/* Hand the request over to HW and so engine_retire() */
- __i915_request_queue(rq, NULL);
+ __i915_request_queue_bh(rq);
/* Let new submissions commence (and maybe retire this timeline) */
__intel_wakeref_defer_park(&engine->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_stats.h b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
new file mode 100644
index 000000000000..24fbdd94351a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_ENGINE_STATS_H__
+#define __INTEL_ENGINE_STATS_H__
+
+#include <linux/atomic.h>
+#include <linux/ktime.h>
+#include <linux/seqlock.h>
+
+#include "i915_gem.h" /* GEM_BUG_ON */
+#include "intel_engine.h"
+
+static inline void intel_engine_context_in(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ if (engine->stats.active) {
+ engine->stats.active++;
+ return;
+ }
+
+ /* The writer is serialised; but the pmu reader may be from hardirq */
+ local_irq_save(flags);
+ write_seqcount_begin(&engine->stats.lock);
+
+ engine->stats.start = ktime_get();
+ engine->stats.active++;
+
+ write_seqcount_end(&engine->stats.lock);
+ local_irq_restore(flags);
+
+ GEM_BUG_ON(!engine->stats.active);
+}
+
+static inline void intel_engine_context_out(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ GEM_BUG_ON(!engine->stats.active);
+ if (engine->stats.active > 1) {
+ engine->stats.active--;
+ return;
+ }
+
+ local_irq_save(flags);
+ write_seqcount_begin(&engine->stats.lock);
+
+ engine->stats.active--;
+ engine->stats.total =
+ ktime_add(engine->stats.total,
+ ktime_sub(ktime_get(), engine->stats.start));
+
+ write_seqcount_end(&engine->stats.lock);
+ local_irq_restore(flags);
+}
+
+#endif /* __INTEL_ENGINE_STATS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index ee6312601c56..d2346b425547 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -68,6 +68,7 @@ typedef u8 intel_engine_mask_t;
#define ALL_ENGINES ((intel_engine_mask_t)~0ul)
struct intel_hw_status_page {
+ struct list_head timelines;
struct i915_vma *vma;
u32 *addr;
};
@@ -183,7 +184,8 @@ struct intel_engine_execlists {
* Reserve the upper 16b for tracking internal errors.
*/
u32 error_interrupt;
-#define ERROR_CSB BIT(31)
+#define ERROR_CSB BIT(31)
+#define ERROR_PREEMPT BIT(30)
/**
* @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset
@@ -237,16 +239,6 @@ struct intel_engine_execlists {
unsigned int port_mask;
/**
- * @switch_priority_hint: Second context priority.
- *
- * We submit multiple contexts to the HW simultaneously and would
- * like to occasionally switch between them to emulate timeslicing.
- * To know when timeslicing is suitable, we track the priority of
- * the context submitted second.
- */
- int switch_priority_hint;
-
- /**
* @queue_priority_hint: Highest pending priority.
*
* When we add requests into the queue, or adjust the priority of
@@ -327,7 +319,7 @@ struct intel_engine_cs {
* as possible.
*/
enum forcewake_domains fw_domain;
- atomic_t fw_active;
+ unsigned int fw_active;
unsigned long context_tag;
@@ -524,12 +516,12 @@ struct intel_engine_cs {
/**
* @active: Number of contexts currently scheduled in.
*/
- atomic_t active;
+ unsigned int active;
/**
* @lock: Lock protecting the below fields.
*/
- seqlock_t lock;
+ seqcount_t lock;
/**
* @total: Total time this engine was busy.
@@ -559,6 +551,8 @@ struct intel_engine_cs {
unsigned long stop_timeout_ms;
unsigned long timeslice_duration_ms;
} props, defaults;
+
+ I915_SELFTEST_DECLARE(struct fault_attr reset_timeout);
};
static inline bool
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
new file mode 100644
index 000000000000..ac1be7a632d3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -0,0 +1,3896 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+/**
+ * DOC: Logical Rings, Logical Ring Contexts and Execlists
+ *
+ * Motivation:
+ * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
+ * These expanded contexts enable a number of new abilities, especially
+ * "Execlists" (also implemented in this file).
+ *
+ * One of the main differences with the legacy HW contexts is that logical
+ * ring contexts incorporate many more things to the context's state, like
+ * PDPs or ringbuffer control registers:
+ *
+ * The reason why PDPs are included in the context is straightforward: as
+ * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
+ * contained there mean you don't need to do a ppgtt->switch_mm yourself,
+ * instead, the GPU will do it for you on the context switch.
+ *
+ * But, what about the ringbuffer control registers (head, tail, etc..)?
+ * shouldn't we just need a set of those per engine command streamer? This is
+ * where the name "Logical Rings" starts to make sense: by virtualizing the
+ * rings, the engine cs shifts to a new "ring buffer" with every context
+ * switch. When you want to submit a workload to the GPU you: A) choose your
+ * context, B) find its appropriate virtualized ring, C) write commands to it
+ * and then, finally, D) tell the GPU to switch to that context.
+ *
+ * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
+ * to a contexts is via a context execution list, ergo "Execlists".
+ *
+ * LRC implementation:
+ * Regarding the creation of contexts, we have:
+ *
+ * - One global default context.
+ * - One local default context for each opened fd.
+ * - One local extra context for each context create ioctl call.
+ *
+ * Now that ringbuffers belong per-context (and not per-engine, like before)
+ * and that contexts are uniquely tied to a given engine (and not reusable,
+ * like before) we need:
+ *
+ * - One ringbuffer per-engine inside each context.
+ * - One backing object per-engine inside each context.
+ *
+ * The global default context starts its life with these new objects fully
+ * allocated and populated. The local default context for each opened fd is
+ * more complex, because we don't know at creation time which engine is going
+ * to use them. To handle this, we have implemented a deferred creation of LR
+ * contexts:
+ *
+ * The local context starts its life as a hollow or blank holder, that only
+ * gets populated for a given engine once we receive an execbuffer. If later
+ * on we receive another execbuffer ioctl for the same context but a different
+ * engine, we allocate/populate a new ringbuffer and context backing object and
+ * so on.
+ *
+ * Finally, regarding local contexts created using the ioctl call: as they are
+ * only allowed with the render ring, we can allocate & populate them right
+ * away (no need to defer anything, at least for now).
+ *
+ * Execlists implementation:
+ * Execlists are the new method by which, on gen8+ hardware, workloads are
+ * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
+ * This method works as follows:
+ *
+ * When a request is committed, its commands (the BB start and any leading or
+ * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
+ * for the appropriate context. The tail pointer in the hardware context is not
+ * updated at this time, but instead, kept by the driver in the ringbuffer
+ * structure. A structure representing this request is added to a request queue
+ * for the appropriate engine: this structure contains a copy of the context's
+ * tail after the request was written to the ring buffer and a pointer to the
+ * context itself.
+ *
+ * If the engine's request queue was empty before the request was added, the
+ * queue is processed immediately. Otherwise the queue will be processed during
+ * a context switch interrupt. In any case, elements on the queue will get sent
+ * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
+ * globally unique 20-bits submission ID.
+ *
+ * When execution of a request completes, the GPU updates the context status
+ * buffer with a context complete event and generates a context switch interrupt.
+ * During the interrupt handling, the driver examines the events in the buffer:
+ * for each context complete event, if the announced ID matches that on the head
+ * of the request queue, then that request is retired and removed from the queue.
+ *
+ * After processing, if any requests were retired and the queue is not empty
+ * then a new execution list can be submitted. The two requests at the front of
+ * the queue are next to be submitted but since a context may not occur twice in
+ * an execution list, if subsequent requests have the same ID as the first then
+ * the two requests must be combined. This is done simply by discarding requests
+ * at the head of the queue until either only one requests is left (in which case
+ * we use a NULL second context) or the first two requests have unique IDs.
+ *
+ * By always executing the first two requests in the queue the driver ensures
+ * that the GPU is kept as busy as possible. In the case where a single context
+ * completes but a second context is still executing, the request for this second
+ * context will be at the head of the queue when we remove the first one. This
+ * request will then be resubmitted along with a new request for a different context,
+ * which will cause the hardware to continue executing the second request and queue
+ * the new request (the GPU detects the condition of a context getting preempted
+ * with the same context and optimizes the context switch flow by not doing
+ * preemption, but just sampling the new tail pointer).
+ *
+ */
+#include <linux/interrupt.h>
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+#include "gen8_engine_cs.h"
+#include "intel_breadcrumbs.h"
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_engine_stats.h"
+#include "intel_execlists_submission.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_lrc.h"
+#include "intel_lrc_reg.h"
+#include "intel_mocs.h"
+#include "intel_reset.h"
+#include "intel_ring.h"
+#include "intel_workarounds.h"
+#include "shmem_utils.h"
+
+#define RING_EXECLIST_QFULL (1 << 0x2)
+#define RING_EXECLIST1_VALID (1 << 0x3)
+#define RING_EXECLIST0_VALID (1 << 0x4)
+#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
+#define RING_EXECLIST1_ACTIVE (1 << 0x11)
+#define RING_EXECLIST0_ACTIVE (1 << 0x12)
+
+#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
+#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
+#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
+#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
+#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
+#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
+
+#define GEN8_CTX_STATUS_COMPLETED_MASK \
+ (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
+
+#define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE (0x1) /* lower csb dword */
+#define GEN12_CTX_SWITCH_DETAIL(csb_dw) ((csb_dw) & 0xF) /* upper csb dword */
+#define GEN12_CSB_SW_CTX_ID_MASK GENMASK(25, 15)
+#define GEN12_IDLE_CTX_ID 0x7FF
+#define GEN12_CSB_CTX_VALID(csb_dw) \
+ (FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID)
+
+/* Typical size of the average request (2 pipecontrols and a MI_BB) */
+#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
+
+struct virtual_engine {
+ struct intel_engine_cs base;
+ struct intel_context context;
+ struct rcu_work rcu;
+
+ /*
+ * We allow only a single request through the virtual engine at a time
+ * (each request in the timeline waits for the completion fence of
+ * the previous before being submitted). By restricting ourselves to
+ * only submitting a single request, each request is placed on to a
+ * physical to maximise load spreading (by virtue of the late greedy
+ * scheduling -- each real engine takes the next available request
+ * upon idling).
+ */
+ struct i915_request *request;
+
+ /*
+ * We keep a rbtree of available virtual engines inside each physical
+ * engine, sorted by priority. Here we preallocate the nodes we need
+ * for the virtual engine, indexed by physical_engine->id.
+ */
+ struct ve_node {
+ struct rb_node rb;
+ int prio;
+ } nodes[I915_NUM_ENGINES];
+
+ /*
+ * Keep track of bonded pairs -- restrictions upon on our selection
+ * of physical engines any particular request may be submitted to.
+ * If we receive a submit-fence from a master engine, we will only
+ * use one of sibling_mask physical engines.
+ */
+ struct ve_bond {
+ const struct intel_engine_cs *master;
+ intel_engine_mask_t sibling_mask;
+ } *bonds;
+ unsigned int num_bonds;
+
+ /* And finally, which physical engines this virtual engine maps onto. */
+ unsigned int num_siblings;
+ struct intel_engine_cs *siblings[];
+};
+
+static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
+{
+ GEM_BUG_ON(!intel_engine_is_virtual(engine));
+ return container_of(engine, struct virtual_engine, base);
+}
+
+static struct i915_request *
+__active_request(const struct intel_timeline * const tl,
+ struct i915_request *rq,
+ int error)
+{
+ struct i915_request *active = rq;
+
+ list_for_each_entry_from_reverse(rq, &tl->requests, link) {
+ if (__i915_request_is_complete(rq))
+ break;
+
+ if (error) {
+ i915_request_set_error_once(rq, error);
+ __i915_request_skip(rq);
+ }
+ active = rq;
+ }
+
+ return active;
+}
+
+static struct i915_request *
+active_request(const struct intel_timeline * const tl, struct i915_request *rq)
+{
+ return __active_request(tl, rq, 0);
+}
+
+static void ring_set_paused(const struct intel_engine_cs *engine, int state)
+{
+ /*
+ * We inspect HWS_PREEMPT with a semaphore inside
+ * engine->emit_fini_breadcrumb. If the dword is true,
+ * the ring is paused as the semaphore will busywait
+ * until the dword is false.
+ */
+ engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state;
+ if (state)
+ wmb();
+}
+
+static struct i915_priolist *to_priolist(struct rb_node *rb)
+{
+ return rb_entry(rb, struct i915_priolist, node);
+}
+
+static int rq_prio(const struct i915_request *rq)
+{
+ return READ_ONCE(rq->sched.attr.priority);
+}
+
+static int effective_prio(const struct i915_request *rq)
+{
+ int prio = rq_prio(rq);
+
+ /*
+ * If this request is special and must not be interrupted at any
+ * cost, so be it. Note we are only checking the most recent request
+ * in the context and so may be masking an earlier vip request. It
+ * is hoped that under the conditions where nopreempt is used, this
+ * will not matter (i.e. all requests to that context will be
+ * nopreempt for as long as desired).
+ */
+ if (i915_request_has_nopreempt(rq))
+ prio = I915_PRIORITY_UNPREEMPTABLE;
+
+ return prio;
+}
+
+static int queue_prio(const struct intel_engine_execlists *execlists)
+{
+ struct i915_priolist *p;
+ struct rb_node *rb;
+
+ rb = rb_first_cached(&execlists->queue);
+ if (!rb)
+ return INT_MIN;
+
+ /*
+ * As the priolist[] are inverted, with the highest priority in [0],
+ * we have to flip the index value to become priority.
+ */
+ p = to_priolist(rb);
+ if (!I915_USER_PRIORITY_SHIFT)
+ return p->priority;
+
+ return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
+}
+
+static int virtual_prio(const struct intel_engine_execlists *el)
+{
+ struct rb_node *rb = rb_first_cached(&el->virtual);
+
+ return rb ? rb_entry(rb, struct ve_node, rb)->prio : INT_MIN;
+}
+
+static bool need_preempt(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ int last_prio;
+
+ if (!intel_engine_has_semaphores(engine))
+ return false;
+
+ /*
+ * Check if the current priority hint merits a preemption attempt.
+ *
+ * We record the highest value priority we saw during rescheduling
+ * prior to this dequeue, therefore we know that if it is strictly
+ * less than the current tail of ESLP[0], we do not need to force
+ * a preempt-to-idle cycle.
+ *
+ * However, the priority hint is a mere hint that we may need to
+ * preempt. If that hint is stale or we may be trying to preempt
+ * ourselves, ignore the request.
+ *
+ * More naturally we would write
+ * prio >= max(0, last);
+ * except that we wish to prevent triggering preemption at the same
+ * priority level: the task that is running should remain running
+ * to preserve FIFO ordering of dependencies.
+ */
+ last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1);
+ if (engine->execlists.queue_priority_hint <= last_prio)
+ return false;
+
+ /*
+ * Check against the first request in ELSP[1], it will, thanks to the
+ * power of PI, be the highest priority of that context.
+ */
+ if (!list_is_last(&rq->sched.link, &engine->active.requests) &&
+ rq_prio(list_next_entry(rq, sched.link)) > last_prio)
+ return true;
+
+ /*
+ * If the inflight context did not trigger the preemption, then maybe
+ * it was the set of queued requests? Pick the highest priority in
+ * the queue (the first active priolist) and see if it deserves to be
+ * running instead of ELSP[0].
+ *
+ * The highest priority request in the queue can not be either
+ * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
+ * context, it's priority would not exceed ELSP[0] aka last_prio.
+ */
+ return max(virtual_prio(&engine->execlists),
+ queue_prio(&engine->execlists)) > last_prio;
+}
+
+__maybe_unused static bool
+assert_priority_queue(const struct i915_request *prev,
+ const struct i915_request *next)
+{
+ /*
+ * Without preemption, the prev may refer to the still active element
+ * which we refuse to let go.
+ *
+ * Even with preemption, there are times when we think it is better not
+ * to preempt and leave an ostensibly lower priority request in flight.
+ */
+ if (i915_request_is_active(prev))
+ return true;
+
+ return rq_prio(prev) >= rq_prio(next);
+}
+
+static struct i915_request *
+__unwind_incomplete_requests(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq, *rn, *active = NULL;
+ struct list_head *pl;
+ int prio = I915_PRIORITY_INVALID;
+
+ lockdep_assert_held(&engine->active.lock);
+
+ list_for_each_entry_safe_reverse(rq, rn,
+ &engine->active.requests,
+ sched.link) {
+ if (__i915_request_is_complete(rq)) {
+ list_del_init(&rq->sched.link);
+ continue;
+ }
+
+ __i915_request_unsubmit(rq);
+
+ GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
+ if (rq_prio(rq) != prio) {
+ prio = rq_prio(rq);
+ pl = i915_sched_lookup_priolist(engine, prio);
+ }
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+
+ list_move(&rq->sched.link, pl);
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
+ /* Check in case we rollback so far we wrap [size/2] */
+ if (intel_ring_direction(rq->ring,
+ rq->tail,
+ rq->ring->tail + 8) > 0)
+ rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
+
+ active = rq;
+ }
+
+ return active;
+}
+
+struct i915_request *
+execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
+{
+ struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+
+ return __unwind_incomplete_requests(engine);
+}
+
+static void
+execlists_context_status_change(struct i915_request *rq, unsigned long status)
+{
+ /*
+ * Only used when GVT-g is enabled now. When GVT-g is disabled,
+ * The compiler should eliminate this function as dead-code.
+ */
+ if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
+ return;
+
+ atomic_notifier_call_chain(&rq->engine->context_status_notifier,
+ status, rq);
+}
+
+static void reset_active(struct i915_request *rq,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context * const ce = rq->context;
+ u32 head;
+
+ /*
+ * The executing context has been cancelled. We want to prevent
+ * further execution along this context and propagate the error on
+ * to anything depending on its results.
+ *
+ * In __i915_request_submit(), we apply the -EIO and remove the
+ * requests' payloads for any banned requests. But first, we must
+ * rewind the context back to the start of the incomplete request so
+ * that we do not jump back into the middle of the batch.
+ *
+ * We preserve the breadcrumbs and semaphores of the incomplete
+ * requests so that inter-timeline dependencies (i.e other timelines)
+ * remain correctly ordered. And we defer to __i915_request_submit()
+ * so that all asynchronous waits are correctly handled.
+ */
+ ENGINE_TRACE(engine, "{ reset rq=%llx:%lld }\n",
+ rq->fence.context, rq->fence.seqno);
+
+ /* On resubmission of the active request, payload will be scrubbed */
+ if (__i915_request_is_complete(rq))
+ head = rq->tail;
+ else
+ head = __active_request(ce->timeline, rq, -EIO)->head;
+ head = intel_ring_wrap(ce->ring, head);
+
+ /* Scrub the context image to prevent replaying the previous batch */
+ lrc_init_regs(ce, engine, true);
+
+ /* We've switched away, so this should be a no-op, but intent matters */
+ ce->lrc.lrca = lrc_update_regs(ce, engine, head);
+}
+
+static struct intel_engine_cs *
+__execlists_schedule_in(struct i915_request *rq)
+{
+ struct intel_engine_cs * const engine = rq->engine;
+ struct intel_context * const ce = rq->context;
+
+ intel_context_get(ce);
+
+ if (unlikely(intel_context_is_closed(ce) &&
+ !intel_engine_has_heartbeat(engine)))
+ intel_context_set_banned(ce);
+
+ if (unlikely(intel_context_is_banned(ce)))
+ reset_active(rq, engine);
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ lrc_check_regs(ce, engine, "before");
+
+ if (ce->tag) {
+ /* Use a fixed tag for OA and friends */
+ GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
+ ce->lrc.ccid = ce->tag;
+ } else {
+ /* We don't need a strict matching tag, just different values */
+ unsigned int tag = __ffs(engine->context_tag);
+
+ GEM_BUG_ON(tag >= BITS_PER_LONG);
+ __clear_bit(tag, &engine->context_tag);
+ ce->lrc.ccid = (1 + tag) << (GEN11_SW_CTX_ID_SHIFT - 32);
+
+ BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID);
+ }
+
+ ce->lrc.ccid |= engine->execlists.ccid;
+
+ __intel_gt_pm_get(engine->gt);
+ if (engine->fw_domain && !engine->fw_active++)
+ intel_uncore_forcewake_get(engine->uncore, engine->fw_domain);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+ intel_engine_context_in(engine);
+
+ CE_TRACE(ce, "schedule-in, ccid:%x\n", ce->lrc.ccid);
+
+ return engine;
+}
+
+static void execlists_schedule_in(struct i915_request *rq, int idx)
+{
+ struct intel_context * const ce = rq->context;
+ struct intel_engine_cs *old;
+
+ GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
+ trace_i915_request_in(rq, idx);
+
+ old = ce->inflight;
+ if (!old)
+ old = __execlists_schedule_in(rq);
+ WRITE_ONCE(ce->inflight, ptr_inc(old));
+
+ GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
+}
+
+static void
+resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
+{
+ struct intel_engine_cs *engine = rq->engine;
+
+ spin_lock_irq(&engine->active.lock);
+
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ WRITE_ONCE(rq->engine, &ve->base);
+ ve->base.submit_request(rq);
+
+ spin_unlock_irq(&engine->active.lock);
+}
+
+static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ struct intel_engine_cs *engine = rq->engine;
+
+ /*
+ * After this point, the rq may be transferred to a new sibling, so
+ * before we clear ce->inflight make sure that the context has been
+ * removed from the b->signalers and furthermore we need to make sure
+ * that the concurrent iterator in signal_irq_work is no longer
+ * following ce->signal_link.
+ */
+ if (!list_empty(&ce->signals))
+ intel_context_remove_breadcrumbs(ce, engine->breadcrumbs);
+
+ /*
+ * This engine is now too busy to run this virtual request, so
+ * see if we can find an alternative engine for it to execute on.
+ * Once a request has become bonded to this engine, we treat it the
+ * same as other native request.
+ */
+ if (i915_request_in_priority_queue(rq) &&
+ rq->execution_mask != engine->mask)
+ resubmit_virtual_request(rq, ve);
+
+ if (READ_ONCE(ve->request))
+ tasklet_hi_schedule(&ve->base.execlists.tasklet);
+}
+
+static void __execlists_schedule_out(struct i915_request * const rq,
+ struct intel_context * const ce)
+{
+ struct intel_engine_cs * const engine = rq->engine;
+ unsigned int ccid;
+
+ /*
+ * NB process_csb() is not under the engine->active.lock and hence
+ * schedule_out can race with schedule_in meaning that we should
+ * refrain from doing non-trivial work here.
+ */
+
+ CE_TRACE(ce, "schedule-out, ccid:%x\n", ce->lrc.ccid);
+ GEM_BUG_ON(ce->inflight != engine);
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ lrc_check_regs(ce, engine, "after");
+
+ /*
+ * If we have just completed this context, the engine may now be
+ * idle and we want to re-enter powersaving.
+ */
+ if (intel_timeline_is_last(ce->timeline, rq) &&
+ __i915_request_is_complete(rq))
+ intel_engine_add_retire(engine, ce->timeline);
+
+ ccid = ce->lrc.ccid;
+ ccid >>= GEN11_SW_CTX_ID_SHIFT - 32;
+ ccid &= GEN12_MAX_CONTEXT_HW_ID;
+ if (ccid < BITS_PER_LONG) {
+ GEM_BUG_ON(ccid == 0);
+ GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag));
+ __set_bit(ccid - 1, &engine->context_tag);
+ }
+
+ lrc_update_runtime(ce);
+ intel_engine_context_out(engine);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+ if (engine->fw_domain && !--engine->fw_active)
+ intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
+ intel_gt_pm_put_async(engine->gt);
+
+ /*
+ * If this is part of a virtual engine, its next request may
+ * have been blocked waiting for access to the active context.
+ * We have to kick all the siblings again in case we need to
+ * switch (e.g. the next request is not runnable on this
+ * engine). Hopefully, we will already have submitted the next
+ * request before the tasklet runs and do not need to rebuild
+ * each virtual tree and kick everyone again.
+ */
+ if (ce->engine != engine)
+ kick_siblings(rq, ce);
+
+ WRITE_ONCE(ce->inflight, NULL);
+ intel_context_put(ce);
+}
+
+static inline void execlists_schedule_out(struct i915_request *rq)
+{
+ struct intel_context * const ce = rq->context;
+
+ trace_i915_request_out(rq);
+
+ GEM_BUG_ON(!ce->inflight);
+ ce->inflight = ptr_dec(ce->inflight);
+ if (!__intel_context_inflight_count(ce->inflight))
+ __execlists_schedule_out(rq, ce);
+
+ i915_request_put(rq);
+}
+
+static u64 execlists_update_context(struct i915_request *rq)
+{
+ struct intel_context *ce = rq->context;
+ u64 desc = ce->lrc.desc;
+ u32 tail, prev;
+
+ /*
+ * WaIdleLiteRestore:bdw,skl
+ *
+ * We should never submit the context with the same RING_TAIL twice
+ * just in case we submit an empty ring, which confuses the HW.
+ *
+ * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of
+ * the normal request to be able to always advance the RING_TAIL on
+ * subsequent resubmissions (for lite restore). Should that fail us,
+ * and we try and submit the same tail again, force the context
+ * reload.
+ *
+ * If we need to return to a preempted context, we need to skip the
+ * lite-restore and force it to reload the RING_TAIL. Otherwise, the
+ * HW has a tendency to ignore us rewinding the TAIL to the end of
+ * an earlier request.
+ */
+ GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
+ prev = rq->ring->tail;
+ tail = intel_ring_set_tail(rq->ring, rq->tail);
+ if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
+ desc |= CTX_DESC_FORCE_RESTORE;
+ ce->lrc_reg_state[CTX_RING_TAIL] = tail;
+ rq->tail = rq->wa_tail;
+
+ /*
+ * Make sure the context image is complete before we submit it to HW.
+ *
+ * Ostensibly, writes (including the WCB) should be flushed prior to
+ * an uncached write such as our mmio register access, the empirical
+ * evidence (esp. on Braswell) suggests that the WC write into memory
+ * may not be visible to the HW prior to the completion of the UC
+ * register write and that we may begin execution from the context
+ * before its image is complete leading to invalid PD chasing.
+ */
+ wmb();
+
+ ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE;
+ return desc;
+}
+
+static void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
+{
+ if (execlists->ctrl_reg) {
+ writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
+ writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
+ } else {
+ writel(upper_32_bits(desc), execlists->submit_reg);
+ writel(lower_32_bits(desc), execlists->submit_reg);
+ }
+}
+
+static __maybe_unused char *
+dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
+{
+ if (!rq)
+ return "";
+
+ snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d",
+ prefix,
+ rq->context->lrc.ccid,
+ rq->fence.context, rq->fence.seqno,
+ __i915_request_is_complete(rq) ? "!" :
+ __i915_request_has_started(rq) ? "*" :
+ "",
+ rq_prio(rq));
+
+ return buf;
+}
+
+static __maybe_unused noinline void
+trace_ports(const struct intel_engine_execlists *execlists,
+ const char *msg,
+ struct i915_request * const *ports)
+{
+ const struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+ char __maybe_unused p0[40], p1[40];
+
+ if (!ports[0])
+ return;
+
+ ENGINE_TRACE(engine, "%s { %s%s }\n", msg,
+ dump_port(p0, sizeof(p0), "", ports[0]),
+ dump_port(p1, sizeof(p1), ", ", ports[1]));
+}
+
+static bool
+reset_in_progress(const struct intel_engine_execlists *execlists)
+{
+ return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
+}
+
+static __maybe_unused noinline bool
+assert_pending_valid(const struct intel_engine_execlists *execlists,
+ const char *msg)
+{
+ struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+ struct i915_request * const *port, *rq;
+ struct intel_context *ce = NULL;
+ bool sentinel = false;
+ u32 ccid = -1;
+
+ trace_ports(execlists, msg, execlists->pending);
+
+ /* We may be messing around with the lists during reset, lalala */
+ if (reset_in_progress(execlists))
+ return true;
+
+ if (!execlists->pending[0]) {
+ GEM_TRACE_ERR("%s: Nothing pending for promotion!\n",
+ engine->name);
+ return false;
+ }
+
+ if (execlists->pending[execlists_num_ports(execlists)]) {
+ GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n",
+ engine->name, execlists_num_ports(execlists));
+ return false;
+ }
+
+ for (port = execlists->pending; (rq = *port); port++) {
+ unsigned long flags;
+ bool ok = true;
+
+ GEM_BUG_ON(!kref_read(&rq->fence.refcount));
+ GEM_BUG_ON(!i915_request_is_active(rq));
+
+ if (ce == rq->context) {
+ GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+ ce = rq->context;
+
+ if (ccid == ce->lrc.ccid) {
+ GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n",
+ engine->name,
+ ccid, ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+ ccid = ce->lrc.ccid;
+
+ /*
+ * Sentinels are supposed to be the last request so they flush
+ * the current execution off the HW. Check that they are the only
+ * request in the pending submission.
+ */
+ if (sentinel) {
+ GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+ sentinel = i915_request_has_sentinel(rq);
+
+ /*
+ * We want virtual requests to only be in the first slot so
+ * that they are never stuck behind a hog and can be immediately
+ * transferred onto the next idle engine.
+ */
+ if (rq->execution_mask != engine->mask &&
+ port != execlists->pending) {
+ GEM_TRACE_ERR("%s: virtual engine:%llx not in prime position[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+
+ /* Hold tightly onto the lock to prevent concurrent retires! */
+ if (!spin_trylock_irqsave(&rq->lock, flags))
+ continue;
+
+ if (__i915_request_is_complete(rq))
+ goto unlock;
+
+ if (i915_active_is_idle(&ce->active) &&
+ !intel_context_is_barrier(ce)) {
+ GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ ok = false;
+ goto unlock;
+ }
+
+ if (!i915_vma_is_pinned(ce->state)) {
+ GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ ok = false;
+ goto unlock;
+ }
+
+ if (!i915_vma_is_pinned(ce->ring->vma)) {
+ GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ ok = false;
+ goto unlock;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&rq->lock, flags);
+ if (!ok)
+ return false;
+ }
+
+ return ce;
+}
+
+static void execlists_submit_ports(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *execlists = &engine->execlists;
+ unsigned int n;
+
+ GEM_BUG_ON(!assert_pending_valid(execlists, "submit"));
+
+ /*
+ * We can skip acquiring intel_runtime_pm_get() here as it was taken
+ * on our behalf by the request (see i915_gem_mark_busy()) and it will
+ * not be relinquished until the device is idle (see
+ * i915_gem_idle_work_handler()). As a precaution, we make sure
+ * that all ELSP are drained i.e. we have processed the CSB,
+ * before allowing ourselves to idle and calling intel_runtime_pm_put().
+ */
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
+ /*
+ * ELSQ note: the submit queue is not cleared after being submitted
+ * to the HW so we need to make sure we always clean it up. This is
+ * currently ensured by the fact that we always write the same number
+ * of elsq entries, keep this in mind before changing the loop below.
+ */
+ for (n = execlists_num_ports(execlists); n--; ) {
+ struct i915_request *rq = execlists->pending[n];
+
+ write_desc(execlists,
+ rq ? execlists_update_context(rq) : 0,
+ n);
+ }
+
+ /* we need to manually load the submit queue */
+ if (execlists->ctrl_reg)
+ writel(EL_CTRL_LOAD, execlists->ctrl_reg);
+}
+
+static bool ctx_single_port_submission(const struct intel_context *ce)
+{
+ return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
+ intel_context_force_single_submission(ce));
+}
+
+static bool can_merge_ctx(const struct intel_context *prev,
+ const struct intel_context *next)
+{
+ if (prev != next)
+ return false;
+
+ if (ctx_single_port_submission(prev))
+ return false;
+
+ return true;
+}
+
+static unsigned long i915_request_flags(const struct i915_request *rq)
+{
+ return READ_ONCE(rq->fence.flags);
+}
+
+static bool can_merge_rq(const struct i915_request *prev,
+ const struct i915_request *next)
+{
+ GEM_BUG_ON(prev == next);
+ GEM_BUG_ON(!assert_priority_queue(prev, next));
+
+ /*
+ * We do not submit known completed requests. Therefore if the next
+ * request is already completed, we can pretend to merge it in
+ * with the previous context (and we will skip updating the ELSP
+ * and tracking). Thus hopefully keeping the ELSP full with active
+ * contexts, despite the best efforts of preempt-to-busy to confuse
+ * us.
+ */
+ if (__i915_request_is_complete(next))
+ return true;
+
+ if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
+ (BIT(I915_FENCE_FLAG_NOPREEMPT) |
+ BIT(I915_FENCE_FLAG_SENTINEL))))
+ return false;
+
+ if (!can_merge_ctx(prev->context, next->context))
+ return false;
+
+ GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno));
+ return true;
+}
+
+static bool virtual_matches(const struct virtual_engine *ve,
+ const struct i915_request *rq,
+ const struct intel_engine_cs *engine)
+{
+ const struct intel_engine_cs *inflight;
+
+ if (!rq)
+ return false;
+
+ if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */
+ return false;
+
+ /*
+ * We track when the HW has completed saving the context image
+ * (i.e. when we have seen the final CS event switching out of
+ * the context) and must not overwrite the context image before
+ * then. This restricts us to only using the active engine
+ * while the previous virtualized request is inflight (so
+ * we reuse the register offsets). This is a very small
+ * hystersis on the greedy seelction algorithm.
+ */
+ inflight = intel_context_inflight(&ve->context);
+ if (inflight && inflight != engine)
+ return false;
+
+ return true;
+}
+
+static struct virtual_engine *
+first_virtual_engine(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *el = &engine->execlists;
+ struct rb_node *rb = rb_first_cached(&el->virtual);
+
+ while (rb) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ struct i915_request *rq = READ_ONCE(ve->request);
+
+ /* lazily cleanup after another engine handled rq */
+ if (!rq || !virtual_matches(ve, rq, engine)) {
+ rb_erase_cached(rb, &el->virtual);
+ RB_CLEAR_NODE(rb);
+ rb = rb_first_cached(&el->virtual);
+ continue;
+ }
+
+ return ve;
+ }
+
+ return NULL;
+}
+
+static void virtual_xfer_context(struct virtual_engine *ve,
+ struct intel_engine_cs *engine)
+{
+ unsigned int n;
+
+ if (likely(engine == ve->siblings[0]))
+ return;
+
+ GEM_BUG_ON(READ_ONCE(ve->context.inflight));
+ if (!intel_engine_has_relative_mmio(engine))
+ lrc_update_offsets(&ve->context, engine);
+
+ /*
+ * Move the bound engine to the top of the list for
+ * future execution. We then kick this tasklet first
+ * before checking others, so that we preferentially
+ * reuse this set of bound registers.
+ */
+ for (n = 1; n < ve->num_siblings; n++) {
+ if (ve->siblings[n] == engine) {
+ swap(ve->siblings[n], ve->siblings[0]);
+ break;
+ }
+ }
+}
+
+static void defer_request(struct i915_request *rq, struct list_head * const pl)
+{
+ LIST_HEAD(list);
+
+ /*
+ * We want to move the interrupted request to the back of
+ * the round-robin list (i.e. its priority level), but
+ * in doing so, we must then move all requests that were in
+ * flight and were waiting for the interrupted request to
+ * be run after it again.
+ */
+ do {
+ struct i915_dependency *p;
+
+ GEM_BUG_ON(i915_request_is_active(rq));
+ list_move_tail(&rq->sched.link, pl);
+
+ for_each_waiter(p, rq) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ if (p->flags & I915_DEPENDENCY_WEAK)
+ continue;
+
+ /* Leave semaphores spinning on the other engines */
+ if (w->engine != rq->engine)
+ continue;
+
+ /* No waiter should start before its signaler */
+ GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) &&
+ __i915_request_has_started(w) &&
+ !__i915_request_is_complete(rq));
+
+ GEM_BUG_ON(i915_request_is_active(w));
+ if (!i915_request_is_ready(w))
+ continue;
+
+ if (rq_prio(w) < rq_prio(rq))
+ continue;
+
+ GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static void defer_active(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = __unwind_incomplete_requests(engine);
+ if (!rq)
+ return;
+
+ defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
+}
+
+static bool
+timeslice_yield(const struct intel_engine_execlists *el,
+ const struct i915_request *rq)
+{
+ /*
+ * Once bitten, forever smitten!
+ *
+ * If the active context ever busy-waited on a semaphore,
+ * it will be treated as a hog until the end of its timeslice (i.e.
+ * until it is scheduled out and replaced by a new submission,
+ * possibly even its own lite-restore). The HW only sends an interrupt
+ * on the first miss, and we do know if that semaphore has been
+ * signaled, or even if it is now stuck on another semaphore. Play
+ * safe, yield if it might be stuck -- it will be given a fresh
+ * timeslice in the near future.
+ */
+ return rq->context->lrc.ccid == READ_ONCE(el->yield);
+}
+
+static bool needs_timeslice(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ if (!intel_engine_has_timeslices(engine))
+ return false;
+
+ /* If not currently active, or about to switch, wait for next event */
+ if (!rq || __i915_request_is_complete(rq))
+ return false;
+
+ /* We do not need to start the timeslice until after the ACK */
+ if (READ_ONCE(engine->execlists.pending[0]))
+ return false;
+
+ /* If ELSP[1] is occupied, always check to see if worth slicing */
+ if (!list_is_last_rcu(&rq->sched.link, &engine->active.requests)) {
+ ENGINE_TRACE(engine, "timeslice required for second inflight context\n");
+ return true;
+ }
+
+ /* Otherwise, ELSP[0] is by itself, but may be waiting in the queue */
+ if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)) {
+ ENGINE_TRACE(engine, "timeslice required for queue\n");
+ return true;
+ }
+
+ if (!RB_EMPTY_ROOT(&engine->execlists.virtual.rb_root)) {
+ ENGINE_TRACE(engine, "timeslice required for virtual\n");
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+timeslice_expired(struct intel_engine_cs *engine, const struct i915_request *rq)
+{
+ const struct intel_engine_execlists *el = &engine->execlists;
+
+ if (i915_request_has_nopreempt(rq) && __i915_request_has_started(rq))
+ return false;
+
+ if (!needs_timeslice(engine, rq))
+ return false;
+
+ return timer_expired(&el->timer) || timeslice_yield(el, rq);
+}
+
+static unsigned long timeslice(const struct intel_engine_cs *engine)
+{
+ return READ_ONCE(engine->props.timeslice_duration_ms);
+}
+
+static void start_timeslice(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *el = &engine->execlists;
+ unsigned long duration;
+
+ /* Disable the timer if there is nothing to switch to */
+ duration = 0;
+ if (needs_timeslice(engine, *el->active)) {
+ /* Avoid continually prolonging an active timeslice */
+ if (timer_active(&el->timer)) {
+ /*
+ * If we just submitted a new ELSP after an old
+ * context, that context may have already consumed
+ * its timeslice, so recheck.
+ */
+ if (!timer_pending(&el->timer))
+ tasklet_hi_schedule(&el->tasklet);
+ return;
+ }
+
+ duration = timeslice(engine);
+ }
+
+ set_timer_ms(&el->timer, duration);
+}
+
+static void record_preemption(struct intel_engine_execlists *execlists)
+{
+ (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
+}
+
+static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ if (!rq)
+ return 0;
+
+ /* Force a fast reset for terminated contexts (ignoring sysfs!) */
+ if (unlikely(intel_context_is_banned(rq->context)))
+ return 1;
+
+ return READ_ONCE(engine->props.preempt_timeout_ms);
+}
+
+static void set_preempt_timeout(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ if (!intel_engine_has_preempt_reset(engine))
+ return;
+
+ set_timer_ms(&engine->execlists.preempt,
+ active_preempt_timeout(engine, rq));
+}
+
+static bool completed(const struct i915_request *rq)
+{
+ if (i915_request_has_sentinel(rq))
+ return false;
+
+ return __i915_request_is_complete(rq);
+}
+
+static void execlists_dequeue(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request **port = execlists->pending;
+ struct i915_request ** const last_port = port + execlists->port_mask;
+ struct i915_request *last, * const *active;
+ struct virtual_engine *ve;
+ struct rb_node *rb;
+ bool submit = false;
+
+ /*
+ * Hardware submission is through 2 ports. Conceptually each port
+ * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
+ * static for a context, and unique to each, so we only execute
+ * requests belonging to a single context from each ring. RING_HEAD
+ * is maintained by the CS in the context image, it marks the place
+ * where it got up to last time, and through RING_TAIL we tell the CS
+ * where we want to execute up to this time.
+ *
+ * In this list the requests are in order of execution. Consecutive
+ * requests from the same context are adjacent in the ringbuffer. We
+ * can combine these requests into a single RING_TAIL update:
+ *
+ * RING_HEAD...req1...req2
+ * ^- RING_TAIL
+ * since to execute req2 the CS must first execute req1.
+ *
+ * Our goal then is to point each port to the end of a consecutive
+ * sequence of requests as being the most optimal (fewest wake ups
+ * and context switches) submission.
+ */
+
+ spin_lock(&engine->active.lock);
+
+ /*
+ * If the queue is higher priority than the last
+ * request in the currently active context, submit afresh.
+ * We will resubmit again afterwards in case we need to split
+ * the active context to interject the preemption request,
+ * i.e. we will retrigger preemption following the ack in case
+ * of trouble.
+ *
+ */
+ active = execlists->active;
+ while ((last = *active) && completed(last))
+ active++;
+
+ if (last) {
+ if (need_preempt(engine, last)) {
+ ENGINE_TRACE(engine,
+ "preempting last=%llx:%lld, prio=%d, hint=%d\n",
+ last->fence.context,
+ last->fence.seqno,
+ last->sched.attr.priority,
+ execlists->queue_priority_hint);
+ record_preemption(execlists);
+
+ /*
+ * Don't let the RING_HEAD advance past the breadcrumb
+ * as we unwind (and until we resubmit) so that we do
+ * not accidentally tell it to go backwards.
+ */
+ ring_set_paused(engine, 1);
+
+ /*
+ * Note that we have not stopped the GPU at this point,
+ * so we are unwinding the incomplete requests as they
+ * remain inflight and so by the time we do complete
+ * the preemption, some of the unwound requests may
+ * complete!
+ */
+ __unwind_incomplete_requests(engine);
+
+ last = NULL;
+ } else if (timeslice_expired(engine, last)) {
+ ENGINE_TRACE(engine,
+ "expired:%s last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
+ yesno(timer_expired(&execlists->timer)),
+ last->fence.context, last->fence.seqno,
+ rq_prio(last),
+ execlists->queue_priority_hint,
+ yesno(timeslice_yield(execlists, last)));
+
+ /*
+ * Consume this timeslice; ensure we start a new one.
+ *
+ * The timeslice expired, and we will unwind the
+ * running contexts and recompute the next ELSP.
+ * If that submit will be the same pair of contexts
+ * (due to dependency ordering), we will skip the
+ * submission. If we don't cancel the timer now,
+ * we will see that the timer has expired and
+ * reschedule the tasklet; continually until the
+ * next context switch or other preeemption event.
+ *
+ * Since we have decided to reschedule based on
+ * consumption of this timeslice, if we submit the
+ * same context again, grant it a full timeslice.
+ */
+ cancel_timer(&execlists->timer);
+ ring_set_paused(engine, 1);
+ defer_active(engine);
+
+ /*
+ * Unlike for preemption, if we rewind and continue
+ * executing the same context as previously active,
+ * the order of execution will remain the same and
+ * the tail will only advance. We do not need to
+ * force a full context restore, as a lite-restore
+ * is sufficient to resample the monotonic TAIL.
+ *
+ * If we switch to any other context, similarly we
+ * will not rewind TAIL of current context, and
+ * normal save/restore will preserve state and allow
+ * us to later continue executing the same request.
+ */
+ last = NULL;
+ } else {
+ /*
+ * Otherwise if we already have a request pending
+ * for execution after the current one, we can
+ * just wait until the next CS event before
+ * queuing more. In either case we will force a
+ * lite-restore preemption event, but if we wait
+ * we hopefully coalesce several updates into a single
+ * submission.
+ */
+ if (active[1]) {
+ /*
+ * Even if ELSP[1] is occupied and not worthy
+ * of timeslices, our queue might be.
+ */
+ spin_unlock(&engine->active.lock);
+ return;
+ }
+ }
+ }
+
+ /* XXX virtual is always taking precedence */
+ while ((ve = first_virtual_engine(engine))) {
+ struct i915_request *rq;
+
+ spin_lock(&ve->base.active.lock);
+
+ rq = ve->request;
+ if (unlikely(!virtual_matches(ve, rq, engine)))
+ goto unlock; /* lost the race to a sibling */
+
+ GEM_BUG_ON(rq->engine != &ve->base);
+ GEM_BUG_ON(rq->context != &ve->context);
+
+ if (unlikely(rq_prio(rq) < queue_prio(execlists))) {
+ spin_unlock(&ve->base.active.lock);
+ break;
+ }
+
+ if (last && !can_merge_rq(last, rq)) {
+ spin_unlock(&ve->base.active.lock);
+ spin_unlock(&engine->active.lock);
+ return; /* leave this for another sibling */
+ }
+
+ ENGINE_TRACE(engine,
+ "virtual rq=%llx:%lld%s, new engine? %s\n",
+ rq->fence.context,
+ rq->fence.seqno,
+ __i915_request_is_complete(rq) ? "!" :
+ __i915_request_has_started(rq) ? "*" :
+ "",
+ yesno(engine != ve->siblings[0]));
+
+ WRITE_ONCE(ve->request, NULL);
+ WRITE_ONCE(ve->base.execlists.queue_priority_hint, INT_MIN);
+
+ rb = &ve->nodes[engine->id].rb;
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
+
+ GEM_BUG_ON(!(rq->execution_mask & engine->mask));
+ WRITE_ONCE(rq->engine, engine);
+
+ if (__i915_request_submit(rq)) {
+ /*
+ * Only after we confirm that we will submit
+ * this request (i.e. it has not already
+ * completed), do we want to update the context.
+ *
+ * This serves two purposes. It avoids
+ * unnecessary work if we are resubmitting an
+ * already completed request after timeslicing.
+ * But more importantly, it prevents us altering
+ * ve->siblings[] on an idle context, where
+ * we may be using ve->siblings[] in
+ * virtual_context_enter / virtual_context_exit.
+ */
+ virtual_xfer_context(ve, engine);
+ GEM_BUG_ON(ve->siblings[0] != engine);
+
+ submit = true;
+ last = rq;
+ }
+
+ i915_request_put(rq);
+unlock:
+ spin_unlock(&ve->base.active.lock);
+
+ /*
+ * Hmm, we have a bunch of virtual engine requests,
+ * but the first one was already completed (thanks
+ * preempt-to-busy!). Keep looking at the veng queue
+ * until we have no more relevant requests (i.e.
+ * the normal submit queue has higher priority).
+ */
+ if (submit)
+ break;
+ }
+
+ while ((rb = rb_first_cached(&execlists->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+ struct i915_request *rq, *rn;
+ int i;
+
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ bool merge = true;
+
+ /*
+ * Can we combine this request with the current port?
+ * It has to be the same context/ringbuffer and not
+ * have any exceptions (e.g. GVT saying never to
+ * combine contexts).
+ *
+ * If we can combine the requests, we can execute both
+ * by updating the RING_TAIL to point to the end of the
+ * second request, and so we never need to tell the
+ * hardware about the first.
+ */
+ if (last && !can_merge_rq(last, rq)) {
+ /*
+ * If we are on the second port and cannot
+ * combine this request with the last, then we
+ * are done.
+ */
+ if (port == last_port)
+ goto done;
+
+ /*
+ * We must not populate both ELSP[] with the
+ * same LRCA, i.e. we must submit 2 different
+ * contexts if we submit 2 ELSP.
+ */
+ if (last->context == rq->context)
+ goto done;
+
+ if (i915_request_has_sentinel(last))
+ goto done;
+
+ /*
+ * We avoid submitting virtual requests into
+ * the secondary ports so that we can migrate
+ * the request immediately to another engine
+ * rather than wait for the primary request.
+ */
+ if (rq->execution_mask != engine->mask)
+ goto done;
+
+ /*
+ * If GVT overrides us we only ever submit
+ * port[0], leaving port[1] empty. Note that we
+ * also have to be careful that we don't queue
+ * the same context (even though a different
+ * request) to the second port.
+ */
+ if (ctx_single_port_submission(last->context) ||
+ ctx_single_port_submission(rq->context))
+ goto done;
+
+ merge = false;
+ }
+
+ if (__i915_request_submit(rq)) {
+ if (!merge) {
+ *port++ = i915_request_get(last);
+ last = NULL;
+ }
+
+ GEM_BUG_ON(last &&
+ !can_merge_ctx(last->context,
+ rq->context));
+ GEM_BUG_ON(last &&
+ i915_seqno_passed(last->fence.seqno,
+ rq->fence.seqno));
+
+ submit = true;
+ last = rq;
+ }
+ }
+
+ rb_erase_cached(&p->node, &execlists->queue);
+ i915_priolist_free(p);
+ }
+done:
+ *port++ = i915_request_get(last);
+
+ /*
+ * Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
+ *
+ * We choose the priority hint such that if we add a request of greater
+ * priority than this, we kick the submission tasklet to decide on
+ * the right order of submitting the requests to hardware. We must
+ * also be prepared to reorder requests as they are in-flight on the
+ * HW. We derive the priority hint then as the first "hole" in
+ * the HW submission ports and if there are no available slots,
+ * the priority of the lowest executing request, i.e. last.
+ *
+ * When we do receive a higher priority request ready to run from the
+ * user, see queue_request(), the priority hint is bumped to that
+ * request triggering preemption on the next dequeue (or subsequent
+ * interrupt for secondary ports).
+ */
+ execlists->queue_priority_hint = queue_prio(execlists);
+ spin_unlock(&engine->active.lock);
+
+ /*
+ * We can skip poking the HW if we ended up with exactly the same set
+ * of requests as currently running, e.g. trying to timeslice a pair
+ * of ordered contexts.
+ */
+ if (submit &&
+ memcmp(active,
+ execlists->pending,
+ (port - execlists->pending) * sizeof(*port))) {
+ *port = NULL;
+ while (port-- != execlists->pending)
+ execlists_schedule_in(*port, port - execlists->pending);
+
+ WRITE_ONCE(execlists->yield, -1);
+ set_preempt_timeout(engine, *active);
+ execlists_submit_ports(engine);
+ } else {
+ ring_set_paused(engine, 0);
+ while (port-- != execlists->pending)
+ i915_request_put(*port);
+ *execlists->pending = NULL;
+ }
+}
+
+static void execlists_dequeue_irq(struct intel_engine_cs *engine)
+{
+ local_irq_disable(); /* Suspend interrupts across request submission */
+ execlists_dequeue(engine);
+ local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */
+}
+
+static void clear_ports(struct i915_request **ports, int count)
+{
+ memset_p((void **)ports, NULL, count);
+}
+
+static void
+copy_ports(struct i915_request **dst, struct i915_request **src, int count)
+{
+ /* A memcpy_p() would be very useful here! */
+ while (count--)
+ WRITE_ONCE(*dst++, *src++); /* avoid write tearing */
+}
+
+static struct i915_request **
+cancel_port_requests(struct intel_engine_execlists * const execlists,
+ struct i915_request **inactive)
+{
+ struct i915_request * const *port;
+
+ for (port = execlists->pending; *port; port++)
+ *inactive++ = *port;
+ clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
+
+ /* Mark the end of active before we overwrite *active */
+ for (port = xchg(&execlists->active, execlists->pending); *port; port++)
+ *inactive++ = *port;
+ clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
+
+ smp_wmb(); /* complete the seqlock for execlists_active() */
+ WRITE_ONCE(execlists->active, execlists->inflight);
+
+ /* Having cancelled all outstanding process_csb(), stop their timers */
+ GEM_BUG_ON(execlists->pending[0]);
+ cancel_timer(&execlists->timer);
+ cancel_timer(&execlists->preempt);
+
+ return inactive;
+}
+
+static void invalidate_csb_entries(const u64 *first, const u64 *last)
+{
+ clflush((void *)first);
+ clflush((void *)last);
+}
+
+/*
+ * Starting with Gen12, the status has a new format:
+ *
+ * bit 0: switched to new queue
+ * bit 1: reserved
+ * bit 2: semaphore wait mode (poll or signal), only valid when
+ * switch detail is set to "wait on semaphore"
+ * bits 3-5: engine class
+ * bits 6-11: engine instance
+ * bits 12-14: reserved
+ * bits 15-25: sw context id of the lrc the GT switched to
+ * bits 26-31: sw counter of the lrc the GT switched to
+ * bits 32-35: context switch detail
+ * - 0: ctx complete
+ * - 1: wait on sync flip
+ * - 2: wait on vblank
+ * - 3: wait on scanline
+ * - 4: wait on semaphore
+ * - 5: context preempted (not on SEMAPHORE_WAIT or
+ * WAIT_FOR_EVENT)
+ * bit 36: reserved
+ * bits 37-43: wait detail (for switch detail 1 to 4)
+ * bits 44-46: reserved
+ * bits 47-57: sw context id of the lrc the GT switched away from
+ * bits 58-63: sw counter of the lrc the GT switched away from
+ */
+static bool gen12_csb_parse(const u64 csb)
+{
+ bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(csb));
+ bool new_queue =
+ lower_32_bits(csb) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
+
+ /*
+ * The context switch detail is not guaranteed to be 5 when a preemption
+ * occurs, so we can't just check for that. The check below works for
+ * all the cases we care about, including preemptions of WAIT
+ * instructions and lite-restore. Preempt-to-idle via the CTRL register
+ * would require some extra handling, but we don't support that.
+ */
+ if (!ctx_away_valid || new_queue) {
+ GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(csb)));
+ return true;
+ }
+
+ /*
+ * switch detail = 5 is covered by the case above and we do not expect a
+ * context switch on an unsuccessful wait instruction since we always
+ * use polling mode.
+ */
+ GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(csb)));
+ return false;
+}
+
+static bool gen8_csb_parse(const u64 csb)
+{
+ return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
+}
+
+static noinline u64
+wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb)
+{
+ u64 entry;
+
+ /*
+ * Reading from the HWSP has one particular advantage: we can detect
+ * a stale entry. Since the write into HWSP is broken, we have no reason
+ * to trust the HW at all, the mmio entry may equally be unordered, so
+ * we prefer the path that is self-checking and as a last resort,
+ * return the mmio value.
+ *
+ * tgl,dg1:HSDES#22011327657
+ */
+ preempt_disable();
+ if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 10)) {
+ int idx = csb - engine->execlists.csb_status;
+ int status;
+
+ status = GEN8_EXECLISTS_STATUS_BUF;
+ if (idx >= 6) {
+ status = GEN11_EXECLISTS_STATUS_BUF2;
+ idx -= 6;
+ }
+ status += sizeof(u64) * idx;
+
+ entry = intel_uncore_read64(engine->uncore,
+ _MMIO(engine->mmio_base + status));
+ }
+ preempt_enable();
+
+ return entry;
+}
+
+static u64 csb_read(const struct intel_engine_cs *engine, u64 * const csb)
+{
+ u64 entry = READ_ONCE(*csb);
+
+ /*
+ * Unfortunately, the GPU does not always serialise its write
+ * of the CSB entries before its write of the CSB pointer, at least
+ * from the perspective of the CPU, using what is known as a Global
+ * Observation Point. We may read a new CSB tail pointer, but then
+ * read the stale CSB entries, causing us to misinterpret the
+ * context-switch events, and eventually declare the GPU hung.
+ *
+ * icl:HSDES#1806554093
+ * tgl:HSDES#22011248461
+ */
+ if (unlikely(entry == -1))
+ entry = wa_csb_read(engine, csb);
+
+ /* Consume this entry so that we can spot its future reuse. */
+ WRITE_ONCE(*csb, -1);
+
+ /* ELSP is an implicit wmb() before the GPU wraps and overwrites csb */
+ return entry;
+}
+
+static void new_timeslice(struct intel_engine_execlists *el)
+{
+ /* By cancelling, we will start afresh in start_timeslice() */
+ cancel_timer(&el->timer);
+}
+
+static struct i915_request **
+process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ u64 * const buf = execlists->csb_status;
+ const u8 num_entries = execlists->csb_size;
+ struct i915_request **prev;
+ u8 head, tail;
+
+ /*
+ * As we modify our execlists state tracking we require exclusive
+ * access. Either we are inside the tasklet, or the tasklet is disabled
+ * and we assume that is only inside the reset paths and so serialised.
+ */
+ GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
+ !reset_in_progress(execlists));
+ GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
+
+ /*
+ * Note that csb_write, csb_status may be either in HWSP or mmio.
+ * When reading from the csb_write mmio register, we have to be
+ * careful to only use the GEN8_CSB_WRITE_PTR portion, which is
+ * the low 4bits. As it happens we know the next 4bits are always
+ * zero and so we can simply masked off the low u8 of the register
+ * and treat it identically to reading from the HWSP (without having
+ * to use explicit shifting and masking, and probably bifurcating
+ * the code to handle the legacy mmio read).
+ */
+ head = execlists->csb_head;
+ tail = READ_ONCE(*execlists->csb_write);
+ if (unlikely(head == tail))
+ return inactive;
+
+ /*
+ * We will consume all events from HW, or at least pretend to.
+ *
+ * The sequence of events from the HW is deterministic, and derived
+ * from our writes to the ELSP, with a smidgen of variability for
+ * the arrival of the asynchronous requests wrt to the inflight
+ * execution. If the HW sends an event that does not correspond with
+ * the one we are expecting, we have to abandon all hope as we lose
+ * all tracking of what the engine is actually executing. We will
+ * only detect we are out of sequence with the HW when we get an
+ * 'impossible' event because we have already drained our own
+ * preemption/promotion queue. If this occurs, we know that we likely
+ * lost track of execution earlier and must unwind and restart, the
+ * simplest way is by stop processing the event queue and force the
+ * engine to reset.
+ */
+ execlists->csb_head = tail;
+ ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
+
+ /*
+ * Hopefully paired with a wmb() in HW!
+ *
+ * We must complete the read of the write pointer before any reads
+ * from the CSB, so that we do not see stale values. Without an rmb
+ * (lfence) the HW may speculatively perform the CSB[] reads *before*
+ * we perform the READ_ONCE(*csb_write).
+ */
+ rmb();
+
+ /* Remember who was last running under the timer */
+ prev = inactive;
+ *prev = NULL;
+
+ do {
+ bool promote;
+ u64 csb;
+
+ if (++head == num_entries)
+ head = 0;
+
+ /*
+ * We are flying near dragons again.
+ *
+ * We hold a reference to the request in execlist_port[]
+ * but no more than that. We are operating in softirq
+ * context and so cannot hold any mutex or sleep. That
+ * prevents us stopping the requests we are processing
+ * in port[] from being retired simultaneously (the
+ * breadcrumb will be complete before we see the
+ * context-switch). As we only hold the reference to the
+ * request, any pointer chasing underneath the request
+ * is subject to a potential use-after-free. Thus we
+ * store all of the bookkeeping within port[] as
+ * required, and avoid using unguarded pointers beneath
+ * request itself. The same applies to the atomic
+ * status notifier.
+ */
+
+ csb = csb_read(engine, buf + head);
+ ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
+ head, upper_32_bits(csb), lower_32_bits(csb));
+
+ if (INTEL_GEN(engine->i915) >= 12)
+ promote = gen12_csb_parse(csb);
+ else
+ promote = gen8_csb_parse(csb);
+ if (promote) {
+ struct i915_request * const *old = execlists->active;
+
+ if (GEM_WARN_ON(!*execlists->pending)) {
+ execlists->error_interrupt |= ERROR_CSB;
+ break;
+ }
+
+ ring_set_paused(engine, 0);
+
+ /* Point active to the new ELSP; prevent overwriting */
+ WRITE_ONCE(execlists->active, execlists->pending);
+ smp_wmb(); /* notify execlists_active() */
+
+ /* cancel old inflight, prepare for switch */
+ trace_ports(execlists, "preempted", old);
+ while (*old)
+ *inactive++ = *old++;
+
+ /* switch pending to inflight */
+ GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
+ copy_ports(execlists->inflight,
+ execlists->pending,
+ execlists_num_ports(execlists));
+ smp_wmb(); /* complete the seqlock */
+ WRITE_ONCE(execlists->active, execlists->inflight);
+
+ /* XXX Magic delay for tgl */
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+ WRITE_ONCE(execlists->pending[0], NULL);
+ } else {
+ if (GEM_WARN_ON(!*execlists->active)) {
+ execlists->error_interrupt |= ERROR_CSB;
+ break;
+ }
+
+ /* port0 completed, advanced to port1 */
+ trace_ports(execlists, "completed", execlists->active);
+
+ /*
+ * We rely on the hardware being strongly
+ * ordered, that the breadcrumb write is
+ * coherent (visible from the CPU) before the
+ * user interrupt is processed. One might assume
+ * that the breadcrumb write being before the
+ * user interrupt and the CS event for the context
+ * switch would therefore be before the CS event
+ * itself...
+ */
+ if (GEM_SHOW_DEBUG() &&
+ !__i915_request_is_complete(*execlists->active)) {
+ struct i915_request *rq = *execlists->active;
+ const u32 *regs __maybe_unused =
+ rq->context->lrc_reg_state;
+
+ ENGINE_TRACE(engine,
+ "context completed before request!\n");
+ ENGINE_TRACE(engine,
+ "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
+ ENGINE_READ(engine, RING_START),
+ ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR,
+ ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR,
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_MI_MODE));
+ ENGINE_TRACE(engine,
+ "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ rq->head, rq->tail,
+ rq->fence.context,
+ lower_32_bits(rq->fence.seqno),
+ hwsp_seqno(rq));
+ ENGINE_TRACE(engine,
+ "ctx:{start:%08x, head:%04x, tail:%04x}, ",
+ regs[CTX_RING_START],
+ regs[CTX_RING_HEAD],
+ regs[CTX_RING_TAIL]);
+ }
+
+ *inactive++ = *execlists->active++;
+
+ GEM_BUG_ON(execlists->active - execlists->inflight >
+ execlists_num_ports(execlists));
+ }
+ } while (head != tail);
+
+ /*
+ * Gen11 has proven to fail wrt global observation point between
+ * entry and tail update, failing on the ordering and thus
+ * we see an old entry in the context status buffer.
+ *
+ * Forcibly evict out entries for the next gpu csb update,
+ * to increase the odds that we get a fresh entries with non
+ * working hardware. The cost for doing so comes out mostly with
+ * the wash as hardware, working or not, will need to do the
+ * invalidation before.
+ */
+ invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
+
+ /*
+ * We assume that any event reflects a change in context flow
+ * and merits a fresh timeslice. We reinstall the timer after
+ * inspecting the queue to see if we need to resumbit.
+ */
+ if (*prev != *execlists->active) /* elide lite-restores */
+ new_timeslice(execlists);
+
+ return inactive;
+}
+
+static void post_process_csb(struct i915_request **port,
+ struct i915_request **last)
+{
+ while (port != last)
+ execlists_schedule_out(*port++);
+}
+
+static void __execlists_hold(struct i915_request *rq)
+{
+ LIST_HEAD(list);
+
+ do {
+ struct i915_dependency *p;
+
+ if (i915_request_is_active(rq))
+ __i915_request_unsubmit(rq);
+
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ list_move_tail(&rq->sched.link, &rq->engine->active.hold);
+ i915_request_set_hold(rq);
+ RQ_TRACE(rq, "on hold\n");
+
+ for_each_waiter(p, rq) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ if (p->flags & I915_DEPENDENCY_WEAK)
+ continue;
+
+ /* Leave semaphores spinning on the other engines */
+ if (w->engine != rq->engine)
+ continue;
+
+ if (!i915_request_is_ready(w))
+ continue;
+
+ if (__i915_request_is_complete(w))
+ continue;
+
+ if (i915_request_on_hold(w))
+ continue;
+
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static bool execlists_hold(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ if (i915_request_on_hold(rq))
+ return false;
+
+ spin_lock_irq(&engine->active.lock);
+
+ if (__i915_request_is_complete(rq)) { /* too late! */
+ rq = NULL;
+ goto unlock;
+ }
+
+ /*
+ * Transfer this request onto the hold queue to prevent it
+ * being resumbitted to HW (and potentially completed) before we have
+ * released it. Since we may have already submitted following
+ * requests, we need to remove those as well.
+ */
+ GEM_BUG_ON(i915_request_on_hold(rq));
+ GEM_BUG_ON(rq->engine != engine);
+ __execlists_hold(rq);
+ GEM_BUG_ON(list_empty(&engine->active.hold));
+
+unlock:
+ spin_unlock_irq(&engine->active.lock);
+ return rq;
+}
+
+static bool hold_request(const struct i915_request *rq)
+{
+ struct i915_dependency *p;
+ bool result = false;
+
+ /*
+ * If one of our ancestors is on hold, we must also be on hold,
+ * otherwise we will bypass it and execute before it.
+ */
+ rcu_read_lock();
+ for_each_signaler(p, rq) {
+ const struct i915_request *s =
+ container_of(p->signaler, typeof(*s), sched);
+
+ if (s->engine != rq->engine)
+ continue;
+
+ result = i915_request_on_hold(s);
+ if (result)
+ break;
+ }
+ rcu_read_unlock();
+
+ return result;
+}
+
+static void __execlists_unhold(struct i915_request *rq)
+{
+ LIST_HEAD(list);
+
+ do {
+ struct i915_dependency *p;
+
+ RQ_TRACE(rq, "hold release\n");
+
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+ GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
+
+ i915_request_clear_hold(rq);
+ list_move_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(rq->engine,
+ rq_prio(rq)));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
+ /* Also release any children on this engine that are ready */
+ for_each_waiter(p, rq) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ if (p->flags & I915_DEPENDENCY_WEAK)
+ continue;
+
+ /* Propagate any change in error status */
+ if (rq->fence.error)
+ i915_request_set_error_once(w, rq->fence.error);
+
+ if (w->engine != rq->engine)
+ continue;
+
+ if (!i915_request_on_hold(w))
+ continue;
+
+ /* Check that no other parents are also on hold */
+ if (hold_request(w))
+ continue;
+
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static void execlists_unhold(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ spin_lock_irq(&engine->active.lock);
+
+ /*
+ * Move this request back to the priority queue, and all of its
+ * children and grandchildren that were suspended along with it.
+ */
+ __execlists_unhold(rq);
+
+ if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
+ engine->execlists.queue_priority_hint = rq_prio(rq);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ }
+
+ spin_unlock_irq(&engine->active.lock);
+}
+
+struct execlists_capture {
+ struct work_struct work;
+ struct i915_request *rq;
+ struct i915_gpu_coredump *error;
+};
+
+static void execlists_capture_work(struct work_struct *work)
+{
+ struct execlists_capture *cap = container_of(work, typeof(*cap), work);
+ const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+ struct intel_engine_cs *engine = cap->rq->engine;
+ struct intel_gt_coredump *gt = cap->error->gt;
+ struct intel_engine_capture_vma *vma;
+
+ /* Compress all the objects attached to the request, slow! */
+ vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp);
+ if (vma) {
+ struct i915_vma_compress *compress =
+ i915_vma_capture_prepare(gt);
+
+ intel_engine_coredump_add_vma(gt->engine, vma, compress);
+ i915_vma_capture_finish(gt, compress);
+ }
+
+ gt->simulated = gt->engine->simulated;
+ cap->error->simulated = gt->simulated;
+
+ /* Publish the error state, and announce it to the world */
+ i915_error_state_store(cap->error);
+ i915_gpu_coredump_put(cap->error);
+
+ /* Return this request and all that depend upon it for signaling */
+ execlists_unhold(engine, cap->rq);
+ i915_request_put(cap->rq);
+
+ kfree(cap);
+}
+
+static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
+{
+ const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
+ struct execlists_capture *cap;
+
+ cap = kmalloc(sizeof(*cap), gfp);
+ if (!cap)
+ return NULL;
+
+ cap->error = i915_gpu_coredump_alloc(engine->i915, gfp);
+ if (!cap->error)
+ goto err_cap;
+
+ cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp);
+ if (!cap->error->gt)
+ goto err_gpu;
+
+ cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp);
+ if (!cap->error->gt->engine)
+ goto err_gt;
+
+ cap->error->gt->engine->hung = true;
+
+ return cap;
+
+err_gt:
+ kfree(cap->error->gt);
+err_gpu:
+ kfree(cap->error);
+err_cap:
+ kfree(cap);
+ return NULL;
+}
+
+static struct i915_request *
+active_context(struct intel_engine_cs *engine, u32 ccid)
+{
+ const struct intel_engine_execlists * const el = &engine->execlists;
+ struct i915_request * const *port, *rq;
+
+ /*
+ * Use the most recent result from process_csb(), but just in case
+ * we trigger an error (via interrupt) before the first CS event has
+ * been written, peek at the next submission.
+ */
+
+ for (port = el->active; (rq = *port); port++) {
+ if (rq->context->lrc.ccid == ccid) {
+ ENGINE_TRACE(engine,
+ "ccid:%x found at active:%zd\n",
+ ccid, port - el->active);
+ return rq;
+ }
+ }
+
+ for (port = el->pending; (rq = *port); port++) {
+ if (rq->context->lrc.ccid == ccid) {
+ ENGINE_TRACE(engine,
+ "ccid:%x found at pending:%zd\n",
+ ccid, port - el->pending);
+ return rq;
+ }
+ }
+
+ ENGINE_TRACE(engine, "ccid:%x not found\n", ccid);
+ return NULL;
+}
+
+static u32 active_ccid(struct intel_engine_cs *engine)
+{
+ return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
+}
+
+static void execlists_capture(struct intel_engine_cs *engine)
+{
+ struct execlists_capture *cap;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
+ return;
+
+ /*
+ * We need to _quickly_ capture the engine state before we reset.
+ * We are inside an atomic section (softirq) here and we are delaying
+ * the forced preemption event.
+ */
+ cap = capture_regs(engine);
+ if (!cap)
+ return;
+
+ spin_lock_irq(&engine->active.lock);
+ cap->rq = active_context(engine, active_ccid(engine));
+ if (cap->rq) {
+ cap->rq = active_request(cap->rq->context->timeline, cap->rq);
+ cap->rq = i915_request_get_rcu(cap->rq);
+ }
+ spin_unlock_irq(&engine->active.lock);
+ if (!cap->rq)
+ goto err_free;
+
+ /*
+ * Remove the request from the execlists queue, and take ownership
+ * of the request. We pass it to our worker who will _slowly_ compress
+ * all the pages the _user_ requested for debugging their batch, after
+ * which we return it to the queue for signaling.
+ *
+ * By removing them from the execlists queue, we also remove the
+ * requests from being processed by __unwind_incomplete_requests()
+ * during the intel_engine_reset(), and so they will *not* be replayed
+ * afterwards.
+ *
+ * Note that because we have not yet reset the engine at this point,
+ * it is possible for the request that we have identified as being
+ * guilty, did in fact complete and we will then hit an arbitration
+ * point allowing the outstanding preemption to succeed. The likelihood
+ * of that is very low (as capturing of the engine registers should be
+ * fast enough to run inside an irq-off atomic section!), so we will
+ * simply hold that request accountable for being non-preemptible
+ * long enough to force the reset.
+ */
+ if (!execlists_hold(engine, cap->rq))
+ goto err_rq;
+
+ INIT_WORK(&cap->work, execlists_capture_work);
+ schedule_work(&cap->work);
+ return;
+
+err_rq:
+ i915_request_put(cap->rq);
+err_free:
+ i915_gpu_coredump_put(cap->error);
+ kfree(cap);
+}
+
+static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
+{
+ const unsigned int bit = I915_RESET_ENGINE + engine->id;
+ unsigned long *lock = &engine->gt->reset.flags;
+
+ if (!intel_has_reset_engine(engine->gt))
+ return;
+
+ if (test_and_set_bit(bit, lock))
+ return;
+
+ ENGINE_TRACE(engine, "reset for %s\n", msg);
+
+ /* Mark this tasklet as disabled to avoid waiting for it to complete */
+ tasklet_disable_nosync(&engine->execlists.tasklet);
+
+ ring_set_paused(engine, 1); /* Freeze the current request in place */
+ execlists_capture(engine);
+ intel_engine_reset(engine, msg);
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(bit, lock);
+}
+
+static bool preempt_timeout(const struct intel_engine_cs *const engine)
+{
+ const struct timer_list *t = &engine->execlists.preempt;
+
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
+ return false;
+
+ if (!timer_expired(t))
+ return false;
+
+ return engine->execlists.pending[0];
+}
+
+/*
+ * Check the unread Context Status Buffers and manage the submission of new
+ * contexts to the ELSP accordingly.
+ */
+static void execlists_submission_tasklet(unsigned long data)
+{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ struct i915_request *post[2 * EXECLIST_MAX_PORTS];
+ struct i915_request **inactive;
+
+ rcu_read_lock();
+ inactive = process_csb(engine, post);
+ GEM_BUG_ON(inactive - post > ARRAY_SIZE(post));
+
+ if (unlikely(preempt_timeout(engine))) {
+ cancel_timer(&engine->execlists.preempt);
+ engine->execlists.error_interrupt |= ERROR_PREEMPT;
+ }
+
+ if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
+ const char *msg;
+
+ /* Generate the error message in priority wrt to the user! */
+ if (engine->execlists.error_interrupt & GENMASK(15, 0))
+ msg = "CS error"; /* thrown by a user payload */
+ else if (engine->execlists.error_interrupt & ERROR_CSB)
+ msg = "invalid CSB event";
+ else if (engine->execlists.error_interrupt & ERROR_PREEMPT)
+ msg = "preemption time out";
+ else
+ msg = "internal error";
+
+ engine->execlists.error_interrupt = 0;
+ execlists_reset(engine, msg);
+ }
+
+ if (!engine->execlists.pending[0]) {
+ execlists_dequeue_irq(engine);
+ start_timeslice(engine);
+ }
+
+ post_process_csb(post, inactive);
+ rcu_read_unlock();
+}
+
+static void __execlists_kick(struct intel_engine_execlists *execlists)
+{
+ /* Kick the tasklet for some interrupt coalescing and reset handling */
+ tasklet_hi_schedule(&execlists->tasklet);
+}
+
+#define execlists_kick(t, member) \
+ __execlists_kick(container_of(t, struct intel_engine_execlists, member))
+
+static void execlists_timeslice(struct timer_list *timer)
+{
+ execlists_kick(timer, timer);
+}
+
+static void execlists_preempt(struct timer_list *timer)
+{
+ execlists_kick(timer, preempt);
+}
+
+static void queue_request(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ GEM_BUG_ON(!list_empty(&rq->sched.link));
+ list_add_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(engine, rq_prio(rq)));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+}
+
+static bool submit_queue(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ struct intel_engine_execlists *execlists = &engine->execlists;
+
+ if (rq_prio(rq) <= execlists->queue_priority_hint)
+ return false;
+
+ execlists->queue_priority_hint = rq_prio(rq);
+ return true;
+}
+
+static bool ancestor_on_hold(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ GEM_BUG_ON(i915_request_on_hold(rq));
+ return !list_empty(&engine->active.hold) && hold_request(rq);
+}
+
+static void execlists_submit_request(struct i915_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ unsigned long flags;
+
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ if (unlikely(ancestor_on_hold(engine, request))) {
+ RQ_TRACE(request, "ancestor on hold\n");
+ list_add_tail(&request->sched.link, &engine->active.hold);
+ i915_request_set_hold(request);
+ } else {
+ queue_request(engine, request);
+
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+ GEM_BUG_ON(list_empty(&request->sched.link));
+
+ if (submit_queue(engine, request))
+ __execlists_kick(&engine->execlists);
+ }
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+static int execlists_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
+{
+ return lrc_pre_pin(ce, ce->engine, ww, vaddr);
+}
+
+static int execlists_context_pin(struct intel_context *ce, void *vaddr)
+{
+ return lrc_pin(ce, ce->engine, vaddr);
+}
+
+static int execlists_context_alloc(struct intel_context *ce)
+{
+ return lrc_alloc(ce, ce->engine);
+}
+
+static const struct intel_context_ops execlists_context_ops = {
+ .flags = COPS_HAS_INFLIGHT,
+
+ .alloc = execlists_context_alloc,
+
+ .pre_pin = execlists_context_pre_pin,
+ .pin = execlists_context_pin,
+ .unpin = lrc_unpin,
+ .post_unpin = lrc_post_unpin,
+
+ .enter = intel_context_enter_engine,
+ .exit = intel_context_exit_engine,
+
+ .reset = lrc_reset,
+ .destroy = lrc_destroy,
+};
+
+static int emit_pdps(struct i915_request *rq)
+{
+ const struct intel_engine_cs * const engine = rq->engine;
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm);
+ int err, i;
+ u32 *cs;
+
+ GEM_BUG_ON(intel_vgpu_active(rq->engine->i915));
+
+ /*
+ * Beware ye of the dragons, this sequence is magic!
+ *
+ * Small changes to this sequence can cause anything from
+ * GPU hangs to forcewake errors and machine lockups!
+ */
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ /* Flush any residual operations from the context load */
+ err = engine->emit_flush(rq, EMIT_FLUSH);
+ if (err)
+ return err;
+
+ /* Magic required to prevent forcewake errors! */
+ err = engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ return err;
+
+ cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Ensure the LRI have landed before we invalidate & continue */
+ *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
+ for (i = GEN8_3LVL_PDPES; i--; ) {
+ const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+ u32 base = engine->mmio_base;
+
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
+ *cs++ = upper_32_bits(pd_daddr);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
+ *cs++ = lower_32_bits(pd_daddr);
+ }
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ intel_ring_advance(rq, cs);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int execlists_request_alloc(struct i915_request *request)
+{
+ int ret;
+
+ GEM_BUG_ON(!intel_context_is_pinned(request->context));
+
+ /*
+ * Flush enough space to reduce the likelihood of waiting after
+ * we start building the request - in which case we will just
+ * have to repeat work.
+ */
+ request->reserved_space += EXECLISTS_REQUEST_SIZE;
+
+ /*
+ * Note that after this point, we have committed to using
+ * this request as it is being used to both track the
+ * state of engine initialisation and liveness of the
+ * golden renderstate above. Think twice before you try
+ * to cancel/unwind this request now.
+ */
+
+ if (!i915_vm_is_4lvl(request->context->vm)) {
+ ret = emit_pdps(request);
+ if (ret)
+ return ret;
+ }
+
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+ if (ret)
+ return ret;
+
+ request->reserved_space -= EXECLISTS_REQUEST_SIZE;
+ return 0;
+}
+
+static void reset_csb_pointers(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ const unsigned int reset_value = execlists->csb_size - 1;
+
+ ring_set_paused(engine, 0);
+
+ /*
+ * Sometimes Icelake forgets to reset its pointers on a GPU reset.
+ * Bludgeon them with a mmio update to be sure.
+ */
+ ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+ 0xffff << 16 | reset_value << 8 | reset_value);
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+ /*
+ * After a reset, the HW starts writing into CSB entry [0]. We
+ * therefore have to set our HEAD pointer back one entry so that
+ * the *first* entry we check is entry 0. To complicate this further,
+ * as we don't wait for the first interrupt after reset, we have to
+ * fake the HW write to point back to the last entry so that our
+ * inline comparison of our cached head position against the last HW
+ * write works even before the first interrupt.
+ */
+ execlists->csb_head = reset_value;
+ WRITE_ONCE(*execlists->csb_write, reset_value);
+ wmb(); /* Make sure this is visible to HW (paranoia?) */
+
+ /* Check that the GPU does indeed update the CSB entries! */
+ memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64));
+ invalidate_csb_entries(&execlists->csb_status[0],
+ &execlists->csb_status[reset_value]);
+
+ /* Once more for luck and our trusty paranoia */
+ ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+ 0xffff << 16 | reset_value << 8 | reset_value);
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+ GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value);
+}
+
+static void sanitize_hwsp(struct intel_engine_cs *engine)
+{
+ struct intel_timeline *tl;
+
+ list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
+ intel_timeline_reset_seqno(tl);
+}
+
+static void execlists_sanitize(struct intel_engine_cs *engine)
+{
+ GEM_BUG_ON(execlists_active(&engine->execlists));
+
+ /*
+ * Poison residual state on resume, in case the suspend didn't!
+ *
+ * We have to assume that across suspend/resume (or other loss
+ * of control) that the contents of our pinned buffers has been
+ * lost, replaced by garbage. Since this doesn't always happen,
+ * let's poison such state so that we more quickly spot when
+ * we falsely assume it has been preserved.
+ */
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
+
+ reset_csb_pointers(engine);
+
+ /*
+ * The kernel_context HWSP is stored in the status_page. As above,
+ * that may be lost on resume/initialisation, and so we need to
+ * reset the value in the HWSP.
+ */
+ sanitize_hwsp(engine);
+
+ /* And scrub the dirty cachelines for the HWSP */
+ clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+}
+
+static void enable_error_interrupt(struct intel_engine_cs *engine)
+{
+ u32 status;
+
+ engine->execlists.error_interrupt = 0;
+ ENGINE_WRITE(engine, RING_EMR, ~0u);
+ ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */
+
+ status = ENGINE_READ(engine, RING_ESR);
+ if (unlikely(status)) {
+ drm_err(&engine->i915->drm,
+ "engine '%s' resumed still in error: %08x\n",
+ engine->name, status);
+ __intel_gt_reset(engine->gt, engine->mask);
+ }
+
+ /*
+ * On current gen8+, we have 2 signals to play with
+ *
+ * - I915_ERROR_INSTUCTION (bit 0)
+ *
+ * Generate an error if the command parser encounters an invalid
+ * instruction
+ *
+ * This is a fatal error.
+ *
+ * - CP_PRIV (bit 2)
+ *
+ * Generate an error on privilege violation (where the CP replaces
+ * the instruction with a no-op). This also fires for writes into
+ * read-only scratch pages.
+ *
+ * This is a non-fatal error, parsing continues.
+ *
+ * * there are a few others defined for odd HW that we do not use
+ *
+ * Since CP_PRIV fires for cases where we have chosen to ignore the
+ * error (as the HW is validating and suppressing the mistakes), we
+ * only unmask the instruction error bit.
+ */
+ ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION);
+}
+
+static void enable_execlists(struct intel_engine_cs *engine)
+{
+ u32 mode;
+
+ assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
+
+ intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
+
+ if (INTEL_GEN(engine->i915) >= 11)
+ mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
+ else
+ mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
+ ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode);
+
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+
+ ENGINE_WRITE_FW(engine,
+ RING_HWS_PGA,
+ i915_ggtt_offset(engine->status_page.vma));
+ ENGINE_POSTING_READ(engine, RING_HWS_PGA);
+
+ enable_error_interrupt(engine);
+}
+
+static bool unexpected_starting_state(struct intel_engine_cs *engine)
+{
+ bool unexpected = false;
+
+ if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
+ drm_dbg(&engine->i915->drm,
+ "STOP_RING still set in RING_MI_MODE\n");
+ unexpected = true;
+ }
+
+ return unexpected;
+}
+
+static int execlists_resume(struct intel_engine_cs *engine)
+{
+ intel_mocs_init_engine(engine);
+
+ intel_breadcrumbs_reset(engine->breadcrumbs);
+
+ if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
+ struct drm_printer p = drm_debug_printer(__func__);
+
+ intel_engine_dump(engine, &p, NULL);
+ }
+
+ enable_execlists(engine);
+
+ return 0;
+}
+
+static void execlists_reset_prepare(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ ENGINE_TRACE(engine, "depth<-%d\n",
+ atomic_read(&execlists->tasklet.count));
+
+ /*
+ * Prevent request submission to the hardware until we have
+ * completed the reset in i915_gem_reset_finish(). If a request
+ * is completed by one engine, it may then queue a request
+ * to a second via its execlists->tasklet *just* as we are
+ * calling engine->resume() and also writing the ELSP.
+ * Turning off the execlists->tasklet until the reset is over
+ * prevents the race.
+ */
+ __tasklet_disable_sync_once(&execlists->tasklet);
+ GEM_BUG_ON(!reset_in_progress(execlists));
+
+ /*
+ * We stop engines, otherwise we might get failed reset and a
+ * dead gpu (on elk). Also as modern gpu as kbl can suffer
+ * from system hang if batchbuffer is progressing when
+ * the reset is issued, regardless of READY_TO_RESET ack.
+ * Thus assume it is best to stop engines on all gens
+ * where we have a gpu reset.
+ *
+ * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+ *
+ * FIXME: Wa for more modern gens needs to be validated
+ */
+ ring_set_paused(engine, 1);
+ intel_engine_stop_cs(engine);
+
+ engine->execlists.reset_ccid = active_ccid(engine);
+}
+
+static struct i915_request **
+reset_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ mb(); /* paranoia: read the CSB pointers from after the reset */
+ clflush(execlists->csb_write);
+ mb();
+
+ inactive = process_csb(engine, inactive); /* drain preemption events */
+
+ /* Following the reset, we need to reload the CSB read/write pointers */
+ reset_csb_pointers(engine);
+
+ return inactive;
+}
+
+static void
+execlists_reset_active(struct intel_engine_cs *engine, bool stalled)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ u32 head;
+
+ /*
+ * Save the currently executing context, even if we completed
+ * its request, it was still running at the time of the
+ * reset and will have been clobbered.
+ */
+ rq = active_context(engine, engine->execlists.reset_ccid);
+ if (!rq)
+ return;
+
+ ce = rq->context;
+ GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
+
+ if (__i915_request_is_complete(rq)) {
+ /* Idle context; tidy up the ring so we can restart afresh */
+ head = intel_ring_wrap(ce->ring, rq->tail);
+ goto out_replay;
+ }
+
+ /* We still have requests in-flight; the engine should be active */
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
+ /* Context has requests still in-flight; it should not be idle! */
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+
+ rq = active_request(ce->timeline, rq);
+ head = intel_ring_wrap(ce->ring, rq->head);
+ GEM_BUG_ON(head == ce->ring->tail);
+
+ /*
+ * If this request hasn't started yet, e.g. it is waiting on a
+ * semaphore, we need to avoid skipping the request or else we
+ * break the signaling chain. However, if the context is corrupt
+ * the request will not restart and we will be stuck with a wedged
+ * device. It is quite often the case that if we issue a reset
+ * while the GPU is loading the context image, that the context
+ * image becomes corrupt.
+ *
+ * Otherwise, if we have not started yet, the request should replay
+ * perfectly and we do not need to flag the result as being erroneous.
+ */
+ if (!__i915_request_has_started(rq))
+ goto out_replay;
+
+ /*
+ * If the request was innocent, we leave the request in the ELSP
+ * and will try to replay it on restarting. The context image may
+ * have been corrupted by the reset, in which case we may have
+ * to service a new GPU hang, but more likely we can continue on
+ * without impact.
+ *
+ * If the request was guilty, we presume the context is corrupt
+ * and have to at least restore the RING register in the context
+ * image back to the expected values to skip over the guilty request.
+ */
+ __i915_request_reset(rq, stalled);
+
+ /*
+ * We want a simple context + ring to execute the breadcrumb update.
+ * We cannot rely on the context being intact across the GPU hang,
+ * so clear it and rebuild just what we need for the breadcrumb.
+ * All pending requests for this context will be zapped, and any
+ * future request will be after userspace has had the opportunity
+ * to recreate its own state.
+ */
+out_replay:
+ ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
+ head, ce->ring->tail);
+ lrc_reset_regs(ce, engine);
+ ce->lrc.lrca = lrc_update_regs(ce, engine, head);
+}
+
+static void execlists_reset_csb(struct intel_engine_cs *engine, bool stalled)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *post[2 * EXECLIST_MAX_PORTS];
+ struct i915_request **inactive;
+
+ rcu_read_lock();
+ inactive = reset_csb(engine, post);
+
+ execlists_reset_active(engine, true);
+
+ inactive = cancel_port_requests(execlists, inactive);
+ post_process_csb(post, inactive);
+ rcu_read_unlock();
+}
+
+static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
+{
+ unsigned long flags;
+
+ ENGINE_TRACE(engine, "\n");
+
+ /* Process the csb, find the guilty context and throw away */
+ execlists_reset_csb(engine, stalled);
+
+ /* Push back any incomplete requests for replay after the reset. */
+ rcu_read_lock();
+ spin_lock_irqsave(&engine->active.lock, flags);
+ __unwind_incomplete_requests(engine);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+ rcu_read_unlock();
+}
+
+static void nop_submission_tasklet(unsigned long data)
+{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+
+ /* The driver is wedged; don't process any more events. */
+ WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
+}
+
+static void execlists_reset_cancel(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *rq, *rn;
+ struct rb_node *rb;
+ unsigned long flags;
+
+ ENGINE_TRACE(engine, "\n");
+
+ /*
+ * Before we call engine->cancel_requests(), we should have exclusive
+ * access to the submission state. This is arranged for us by the
+ * caller disabling the interrupt generation, the tasklet and other
+ * threads that may then access the same state, giving us a free hand
+ * to reset state. However, we still need to let lockdep be aware that
+ * we know this state may be accessed in hardirq context, so we
+ * disable the irq around this manipulation and we want to keep
+ * the spinlock focused on its duties and not accidentally conflate
+ * coverage to the submission's irq state. (Similarly, although we
+ * shouldn't need to disable irq around the manipulation of the
+ * submission's irq state, we also wish to remind ourselves that
+ * it is irq state.)
+ */
+ execlists_reset_csb(engine, true);
+
+ rcu_read_lock();
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ /* Mark all executing requests as skipped. */
+ list_for_each_entry(rq, &engine->active.requests, sched.link)
+ i915_request_mark_eio(rq);
+ intel_engine_signal_breadcrumbs(engine);
+
+ /* Flush the queued requests to the timeline list (for retiring). */
+ while ((rb = rb_first_cached(&execlists->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+ int i;
+
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ i915_request_mark_eio(rq);
+ __i915_request_submit(rq);
+ }
+
+ rb_erase_cached(&p->node, &execlists->queue);
+ i915_priolist_free(p);
+ }
+
+ /* On-hold requests will be flushed to timeline upon their release */
+ list_for_each_entry(rq, &engine->active.hold, sched.link)
+ i915_request_mark_eio(rq);
+
+ /* Cancel all attached virtual engines */
+ while ((rb = rb_first_cached(&execlists->virtual))) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
+
+ spin_lock(&ve->base.active.lock);
+ rq = fetch_and_zero(&ve->request);
+ if (rq) {
+ i915_request_mark_eio(rq);
+
+ rq->engine = engine;
+ __i915_request_submit(rq);
+ i915_request_put(rq);
+
+ ve->base.execlists.queue_priority_hint = INT_MIN;
+ }
+ spin_unlock(&ve->base.active.lock);
+ }
+
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+
+ execlists->queue_priority_hint = INT_MIN;
+ execlists->queue = RB_ROOT_CACHED;
+
+ GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
+ execlists->tasklet.func = nop_submission_tasklet;
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+ rcu_read_unlock();
+}
+
+static void execlists_reset_finish(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ /*
+ * After a GPU reset, we may have requests to replay. Do so now while
+ * we still have the forcewake to be sure that the GPU is not allowed
+ * to sleep before we restart and reload a context.
+ *
+ * If the GPU reset fails, the engine may still be alive with requests
+ * inflight. We expect those to complete, or for the device to be
+ * reset as the next level of recovery, and as a final resort we
+ * will declare the device wedged.
+ */
+ GEM_BUG_ON(!reset_in_progress(execlists));
+
+ /* And kick in case we missed a new request submission. */
+ if (__tasklet_enable(&execlists->tasklet))
+ __execlists_kick(execlists);
+
+ ENGINE_TRACE(engine, "depth->%d\n",
+ atomic_read(&execlists->tasklet.count));
+}
+
+static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
+ ENGINE_POSTING_READ(engine, RING_IMR);
+}
+
+static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
+}
+
+static void execlists_park(struct intel_engine_cs *engine)
+{
+ cancel_timer(&engine->execlists.timer);
+ cancel_timer(&engine->execlists.preempt);
+}
+
+static bool can_preempt(struct intel_engine_cs *engine)
+{
+ if (INTEL_GEN(engine->i915) > 8)
+ return true;
+
+ /* GPGPU on bdw requires extra w/a; not implemented */
+ return engine->class != RENDER_CLASS;
+}
+
+static void execlists_set_default_submission(struct intel_engine_cs *engine)
+{
+ engine->submit_request = execlists_submit_request;
+ engine->schedule = i915_schedule;
+ engine->execlists.tasklet.func = execlists_submission_tasklet;
+
+ engine->reset.prepare = execlists_reset_prepare;
+ engine->reset.rewind = execlists_reset_rewind;
+ engine->reset.cancel = execlists_reset_cancel;
+ engine->reset.finish = execlists_reset_finish;
+
+ engine->park = execlists_park;
+ engine->unpark = NULL;
+
+ engine->flags |= I915_ENGINE_SUPPORTS_STATS;
+ if (!intel_vgpu_active(engine->i915)) {
+ engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+ if (can_preempt(engine)) {
+ engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+ if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+ }
+ }
+
+ if (intel_engine_has_preemption(engine))
+ engine->emit_bb_start = gen8_emit_bb_start;
+ else
+ engine->emit_bb_start = gen8_emit_bb_start_noarb;
+}
+
+static void execlists_shutdown(struct intel_engine_cs *engine)
+{
+ /* Synchronise with residual timers and any softirq they raise */
+ del_timer_sync(&engine->execlists.timer);
+ del_timer_sync(&engine->execlists.preempt);
+ tasklet_kill(&engine->execlists.tasklet);
+}
+
+static void execlists_release(struct intel_engine_cs *engine)
+{
+ engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
+
+ execlists_shutdown(engine);
+
+ intel_engine_cleanup_common(engine);
+ lrc_fini_wa_ctx(engine);
+}
+
+static void
+logical_ring_default_vfuncs(struct intel_engine_cs *engine)
+{
+ /* Default vfuncs which can be overriden by each engine. */
+
+ engine->resume = execlists_resume;
+
+ engine->cops = &execlists_context_ops;
+ engine->request_alloc = execlists_request_alloc;
+
+ engine->emit_flush = gen8_emit_flush_xcs;
+ engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
+ if (INTEL_GEN(engine->i915) >= 12) {
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
+ engine->emit_flush = gen12_emit_flush_xcs;
+ }
+ engine->set_default_submission = execlists_set_default_submission;
+
+ if (INTEL_GEN(engine->i915) < 11) {
+ engine->irq_enable = gen8_logical_ring_enable_irq;
+ engine->irq_disable = gen8_logical_ring_disable_irq;
+ } else {
+ /*
+ * TODO: On Gen11 interrupt masks need to be clear
+ * to allow C6 entry. Keep interrupts enabled at
+ * and take the hit of generating extra interrupts
+ * until a more refined solution exists.
+ */
+ }
+}
+
+static void logical_ring_default_irqs(struct intel_engine_cs *engine)
+{
+ unsigned int shift = 0;
+
+ if (INTEL_GEN(engine->i915) < 11) {
+ const u8 irq_shifts[] = {
+ [RCS0] = GEN8_RCS_IRQ_SHIFT,
+ [BCS0] = GEN8_BCS_IRQ_SHIFT,
+ [VCS0] = GEN8_VCS0_IRQ_SHIFT,
+ [VCS1] = GEN8_VCS1_IRQ_SHIFT,
+ [VECS0] = GEN8_VECS_IRQ_SHIFT,
+ };
+
+ shift = irq_shifts[engine->id];
+ }
+
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
+ engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+ engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
+ engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift;
+}
+
+static void rcs_submission_override(struct intel_engine_cs *engine)
+{
+ switch (INTEL_GEN(engine->i915)) {
+ case 12:
+ engine->emit_flush = gen12_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
+ break;
+ case 11:
+ engine->emit_flush = gen11_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
+ break;
+ default:
+ engine->emit_flush = gen8_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
+ break;
+ }
+}
+
+int intel_execlists_submission_setup(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
+ u32 base = engine->mmio_base;
+
+ tasklet_init(&engine->execlists.tasklet,
+ execlists_submission_tasklet, (unsigned long)engine);
+ timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
+ timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
+
+ logical_ring_default_vfuncs(engine);
+ logical_ring_default_irqs(engine);
+
+ if (engine->class == RENDER_CLASS)
+ rcs_submission_override(engine);
+
+ lrc_init_wa_ctx(engine);
+
+ if (HAS_LOGICAL_RING_ELSQ(i915)) {
+ execlists->submit_reg = uncore->regs +
+ i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
+ execlists->ctrl_reg = uncore->regs +
+ i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
+ } else {
+ execlists->submit_reg = uncore->regs +
+ i915_mmio_reg_offset(RING_ELSP(base));
+ }
+
+ execlists->csb_status =
+ (u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
+
+ execlists->csb_write =
+ &engine->status_page.addr[intel_hws_csb_write_index(i915)];
+
+ if (INTEL_GEN(i915) < 11)
+ execlists->csb_size = GEN8_CSB_ENTRIES;
+ else
+ execlists->csb_size = GEN11_CSB_ENTRIES;
+
+ engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
+ if (INTEL_GEN(engine->i915) >= 11) {
+ execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
+ execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
+ }
+
+ /* Finally, take ownership and responsibility for cleanup! */
+ engine->sanitize = execlists_sanitize;
+ engine->release = execlists_release;
+
+ return 0;
+}
+
+static struct list_head *virtual_queue(struct virtual_engine *ve)
+{
+ return &ve->base.execlists.default_priolist.requests[0];
+}
+
+static void rcu_virtual_context_destroy(struct work_struct *wrk)
+{
+ struct virtual_engine *ve =
+ container_of(wrk, typeof(*ve), rcu.work);
+ unsigned int n;
+
+ GEM_BUG_ON(ve->context.inflight);
+
+ /* Preempt-to-busy may leave a stale request behind. */
+ if (unlikely(ve->request)) {
+ struct i915_request *old;
+
+ spin_lock_irq(&ve->base.active.lock);
+
+ old = fetch_and_zero(&ve->request);
+ if (old) {
+ GEM_BUG_ON(!__i915_request_is_complete(old));
+ __i915_request_submit(old);
+ i915_request_put(old);
+ }
+
+ spin_unlock_irq(&ve->base.active.lock);
+ }
+
+ /*
+ * Flush the tasklet in case it is still running on another core.
+ *
+ * This needs to be done before we remove ourselves from the siblings'
+ * rbtrees as in the case it is running in parallel, it may reinsert
+ * the rb_node into a sibling.
+ */
+ tasklet_kill(&ve->base.execlists.tasklet);
+
+ /* Decouple ourselves from the siblings, no more access allowed. */
+ for (n = 0; n < ve->num_siblings; n++) {
+ struct intel_engine_cs *sibling = ve->siblings[n];
+ struct rb_node *node = &ve->nodes[sibling->id].rb;
+
+ if (RB_EMPTY_NODE(node))
+ continue;
+
+ spin_lock_irq(&sibling->active.lock);
+
+ /* Detachment is lazily performed in the execlists tasklet */
+ if (!RB_EMPTY_NODE(node))
+ rb_erase_cached(node, &sibling->execlists.virtual);
+
+ spin_unlock_irq(&sibling->active.lock);
+ }
+ GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
+ GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+
+ lrc_fini(&ve->context);
+ intel_context_fini(&ve->context);
+
+ intel_breadcrumbs_free(ve->base.breadcrumbs);
+ intel_engine_free_request_pool(&ve->base);
+
+ kfree(ve->bonds);
+ kfree(ve);
+}
+
+static void virtual_context_destroy(struct kref *kref)
+{
+ struct virtual_engine *ve =
+ container_of(kref, typeof(*ve), context.ref);
+
+ GEM_BUG_ON(!list_empty(&ve->context.signals));
+
+ /*
+ * When destroying the virtual engine, we have to be aware that
+ * it may still be in use from an hardirq/softirq context causing
+ * the resubmission of a completed request (background completion
+ * due to preempt-to-busy). Before we can free the engine, we need
+ * to flush the submission code and tasklets that are still potentially
+ * accessing the engine. Flushing the tasklets requires process context,
+ * and since we can guard the resubmit onto the engine with an RCU read
+ * lock, we can delegate the free of the engine to an RCU worker.
+ */
+ INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
+ queue_rcu_work(system_wq, &ve->rcu);
+}
+
+static void virtual_engine_initial_hint(struct virtual_engine *ve)
+{
+ int swp;
+
+ /*
+ * Pick a random sibling on starting to help spread the load around.
+ *
+ * New contexts are typically created with exactly the same order
+ * of siblings, and often started in batches. Due to the way we iterate
+ * the array of sibling when submitting requests, sibling[0] is
+ * prioritised for dequeuing. If we make sure that sibling[0] is fairly
+ * randomised across the system, we also help spread the load by the
+ * first engine we inspect being different each time.
+ *
+ * NB This does not force us to execute on this engine, it will just
+ * typically be the first we inspect for submission.
+ */
+ swp = prandom_u32_max(ve->num_siblings);
+ if (swp)
+ swap(ve->siblings[swp], ve->siblings[0]);
+}
+
+static int virtual_context_alloc(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+ return lrc_alloc(ce, ve->siblings[0]);
+}
+
+static int virtual_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+ /* Note: we must use a real engine class for setting up reg state */
+ return lrc_pre_pin(ce, ve->siblings[0], ww, vaddr);
+}
+
+static int virtual_context_pin(struct intel_context *ce, void *vaddr)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+ return lrc_pin(ce, ve->siblings[0], vaddr);
+}
+
+static void virtual_context_enter(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ unsigned int n;
+
+ for (n = 0; n < ve->num_siblings; n++)
+ intel_engine_pm_get(ve->siblings[n]);
+
+ intel_timeline_enter(ce->timeline);
+}
+
+static void virtual_context_exit(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ unsigned int n;
+
+ intel_timeline_exit(ce->timeline);
+
+ for (n = 0; n < ve->num_siblings; n++)
+ intel_engine_pm_put(ve->siblings[n]);
+}
+
+static const struct intel_context_ops virtual_context_ops = {
+ .flags = COPS_HAS_INFLIGHT,
+
+ .alloc = virtual_context_alloc,
+
+ .pre_pin = virtual_context_pre_pin,
+ .pin = virtual_context_pin,
+ .unpin = lrc_unpin,
+ .post_unpin = lrc_post_unpin,
+
+ .enter = virtual_context_enter,
+ .exit = virtual_context_exit,
+
+ .destroy = virtual_context_destroy,
+};
+
+static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
+{
+ struct i915_request *rq;
+ intel_engine_mask_t mask;
+
+ rq = READ_ONCE(ve->request);
+ if (!rq)
+ return 0;
+
+ /* The rq is ready for submission; rq->execution_mask is now stable. */
+ mask = rq->execution_mask;
+ if (unlikely(!mask)) {
+ /* Invalid selection, submit to a random engine in error */
+ i915_request_set_error_once(rq, -ENODEV);
+ mask = ve->siblings[0]->mask;
+ }
+
+ ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
+ rq->fence.context, rq->fence.seqno,
+ mask, ve->base.execlists.queue_priority_hint);
+
+ return mask;
+}
+
+static void virtual_submission_tasklet(unsigned long data)
+{
+ struct virtual_engine * const ve = (struct virtual_engine *)data;
+ const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
+ intel_engine_mask_t mask;
+ unsigned int n;
+
+ rcu_read_lock();
+ mask = virtual_submission_mask(ve);
+ rcu_read_unlock();
+ if (unlikely(!mask))
+ return;
+
+ for (n = 0; n < ve->num_siblings; n++) {
+ struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]);
+ struct ve_node * const node = &ve->nodes[sibling->id];
+ struct rb_node **parent, *rb;
+ bool first;
+
+ if (!READ_ONCE(ve->request))
+ break; /* already handled by a sibling's tasklet */
+
+ spin_lock_irq(&sibling->active.lock);
+
+ if (unlikely(!(mask & sibling->mask))) {
+ if (!RB_EMPTY_NODE(&node->rb)) {
+ rb_erase_cached(&node->rb,
+ &sibling->execlists.virtual);
+ RB_CLEAR_NODE(&node->rb);
+ }
+
+ goto unlock_engine;
+ }
+
+ if (unlikely(!RB_EMPTY_NODE(&node->rb))) {
+ /*
+ * Cheat and avoid rebalancing the tree if we can
+ * reuse this node in situ.
+ */
+ first = rb_first_cached(&sibling->execlists.virtual) ==
+ &node->rb;
+ if (prio == node->prio || (prio > node->prio && first))
+ goto submit_engine;
+
+ rb_erase_cached(&node->rb, &sibling->execlists.virtual);
+ }
+
+ rb = NULL;
+ first = true;
+ parent = &sibling->execlists.virtual.rb_root.rb_node;
+ while (*parent) {
+ struct ve_node *other;
+
+ rb = *parent;
+ other = rb_entry(rb, typeof(*other), rb);
+ if (prio > other->prio) {
+ parent = &rb->rb_left;
+ } else {
+ parent = &rb->rb_right;
+ first = false;
+ }
+ }
+
+ rb_link_node(&node->rb, rb, parent);
+ rb_insert_color_cached(&node->rb,
+ &sibling->execlists.virtual,
+ first);
+
+submit_engine:
+ GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
+ node->prio = prio;
+ if (first && prio > sibling->execlists.queue_priority_hint)
+ tasklet_hi_schedule(&sibling->execlists.tasklet);
+
+unlock_engine:
+ spin_unlock_irq(&sibling->active.lock);
+
+ if (intel_context_inflight(&ve->context))
+ break;
+ }
+}
+
+static void virtual_submit_request(struct i915_request *rq)
+{
+ struct virtual_engine *ve = to_virtual_engine(rq->engine);
+ unsigned long flags;
+
+ ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
+ rq->fence.context,
+ rq->fence.seqno);
+
+ GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
+
+ spin_lock_irqsave(&ve->base.active.lock, flags);
+
+ /* By the time we resubmit a request, it may be completed */
+ if (__i915_request_is_complete(rq)) {
+ __i915_request_submit(rq);
+ goto unlock;
+ }
+
+ if (ve->request) { /* background completion from preempt-to-busy */
+ GEM_BUG_ON(!__i915_request_is_complete(ve->request));
+ __i915_request_submit(ve->request);
+ i915_request_put(ve->request);
+ }
+
+ ve->base.execlists.queue_priority_hint = rq_prio(rq);
+ ve->request = i915_request_get(rq);
+
+ GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+ list_move_tail(&rq->sched.link, virtual_queue(ve));
+
+ tasklet_hi_schedule(&ve->base.execlists.tasklet);
+
+unlock:
+ spin_unlock_irqrestore(&ve->base.active.lock, flags);
+}
+
+static struct ve_bond *
+virtual_find_bond(struct virtual_engine *ve,
+ const struct intel_engine_cs *master)
+{
+ int i;
+
+ for (i = 0; i < ve->num_bonds; i++) {
+ if (ve->bonds[i].master == master)
+ return &ve->bonds[i];
+ }
+
+ return NULL;
+}
+
+static void
+virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
+{
+ struct virtual_engine *ve = to_virtual_engine(rq->engine);
+ intel_engine_mask_t allowed, exec;
+ struct ve_bond *bond;
+
+ allowed = ~to_request(signal)->engine->mask;
+
+ bond = virtual_find_bond(ve, to_request(signal)->engine);
+ if (bond)
+ allowed &= bond->sibling_mask;
+
+ /* Restrict the bonded request to run on only the available engines */
+ exec = READ_ONCE(rq->execution_mask);
+ while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed))
+ ;
+
+ /* Prevent the master from being re-run on the bonded engines */
+ to_request(signal)->execution_mask &= ~allowed;
+}
+
+struct intel_context *
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
+ unsigned int count)
+{
+ struct virtual_engine *ve;
+ unsigned int n;
+ int err;
+
+ if (count == 0)
+ return ERR_PTR(-EINVAL);
+
+ if (count == 1)
+ return intel_context_create(siblings[0]);
+
+ ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
+ if (!ve)
+ return ERR_PTR(-ENOMEM);
+
+ ve->base.i915 = siblings[0]->i915;
+ ve->base.gt = siblings[0]->gt;
+ ve->base.uncore = siblings[0]->uncore;
+ ve->base.id = -1;
+
+ ve->base.class = OTHER_CLASS;
+ ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
+ ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+ ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+
+ /*
+ * The decision on whether to submit a request using semaphores
+ * depends on the saturated state of the engine. We only compute
+ * this during HW submission of the request, and we need for this
+ * state to be globally applied to all requests being submitted
+ * to this engine. Virtual engines encompass more than one physical
+ * engine and so we cannot accurately tell in advance if one of those
+ * engines is already saturated and so cannot afford to use a semaphore
+ * and be pessimized in priority for doing so -- if we are the only
+ * context using semaphores after all other clients have stopped, we
+ * will be starved on the saturated system. Such a global switch for
+ * semaphores is less than ideal, but alas is the current compromise.
+ */
+ ve->base.saturated = ALL_ENGINES;
+
+ snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
+
+ intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
+ intel_engine_init_execlists(&ve->base);
+
+ ve->base.cops = &virtual_context_ops;
+ ve->base.request_alloc = execlists_request_alloc;
+
+ ve->base.schedule = i915_schedule;
+ ve->base.submit_request = virtual_submit_request;
+ ve->base.bond_execute = virtual_bond_execute;
+
+ INIT_LIST_HEAD(virtual_queue(ve));
+ ve->base.execlists.queue_priority_hint = INT_MIN;
+ tasklet_init(&ve->base.execlists.tasklet,
+ virtual_submission_tasklet,
+ (unsigned long)ve);
+
+ intel_context_init(&ve->context, &ve->base);
+
+ ve->base.breadcrumbs = intel_breadcrumbs_create(NULL);
+ if (!ve->base.breadcrumbs) {
+ err = -ENOMEM;
+ goto err_put;
+ }
+
+ for (n = 0; n < count; n++) {
+ struct intel_engine_cs *sibling = siblings[n];
+
+ GEM_BUG_ON(!is_power_of_2(sibling->mask));
+ if (sibling->mask & ve->base.mask) {
+ DRM_DEBUG("duplicate %s entry in load balancer\n",
+ sibling->name);
+ err = -EINVAL;
+ goto err_put;
+ }
+
+ /*
+ * The virtual engine implementation is tightly coupled to
+ * the execlists backend -- we push out request directly
+ * into a tree inside each physical engine. We could support
+ * layering if we handle cloning of the requests and
+ * submitting a copy into each backend.
+ */
+ if (sibling->execlists.tasklet.func !=
+ execlists_submission_tasklet) {
+ err = -ENODEV;
+ goto err_put;
+ }
+
+ GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb));
+ RB_CLEAR_NODE(&ve->nodes[sibling->id].rb);
+
+ ve->siblings[ve->num_siblings++] = sibling;
+ ve->base.mask |= sibling->mask;
+
+ /*
+ * All physical engines must be compatible for their emission
+ * functions (as we build the instructions during request
+ * construction and do not alter them before submission
+ * on the physical engine). We use the engine class as a guide
+ * here, although that could be refined.
+ */
+ if (ve->base.class != OTHER_CLASS) {
+ if (ve->base.class != sibling->class) {
+ DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
+ sibling->class, ve->base.class);
+ err = -EINVAL;
+ goto err_put;
+ }
+ continue;
+ }
+
+ ve->base.class = sibling->class;
+ ve->base.uabi_class = sibling->uabi_class;
+ snprintf(ve->base.name, sizeof(ve->base.name),
+ "v%dx%d", ve->base.class, count);
+ ve->base.context_size = sibling->context_size;
+
+ ve->base.emit_bb_start = sibling->emit_bb_start;
+ ve->base.emit_flush = sibling->emit_flush;
+ ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb;
+ ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb;
+ ve->base.emit_fini_breadcrumb_dw =
+ sibling->emit_fini_breadcrumb_dw;
+
+ ve->base.flags = sibling->flags;
+ }
+
+ ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
+
+ virtual_engine_initial_hint(ve);
+ return &ve->context;
+
+err_put:
+ intel_context_put(&ve->context);
+ return ERR_PTR(err);
+}
+
+struct intel_context *
+intel_execlists_clone_virtual(struct intel_engine_cs *src)
+{
+ struct virtual_engine *se = to_virtual_engine(src);
+ struct intel_context *dst;
+
+ dst = intel_execlists_create_virtual(se->siblings,
+ se->num_siblings);
+ if (IS_ERR(dst))
+ return dst;
+
+ if (se->num_bonds) {
+ struct virtual_engine *de = to_virtual_engine(dst->engine);
+
+ de->bonds = kmemdup(se->bonds,
+ sizeof(*se->bonds) * se->num_bonds,
+ GFP_KERNEL);
+ if (!de->bonds) {
+ intel_context_put(dst);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ de->num_bonds = se->num_bonds;
+ }
+
+ return dst;
+}
+
+int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
+ const struct intel_engine_cs *master,
+ const struct intel_engine_cs *sibling)
+{
+ struct virtual_engine *ve = to_virtual_engine(engine);
+ struct ve_bond *bond;
+ int n;
+
+ /* Sanity check the sibling is part of the virtual engine */
+ for (n = 0; n < ve->num_siblings; n++)
+ if (sibling == ve->siblings[n])
+ break;
+ if (n == ve->num_siblings)
+ return -EINVAL;
+
+ bond = virtual_find_bond(ve, master);
+ if (bond) {
+ bond->sibling_mask |= sibling->mask;
+ return 0;
+ }
+
+ bond = krealloc(ve->bonds,
+ sizeof(*bond) * (ve->num_bonds + 1),
+ GFP_KERNEL);
+ if (!bond)
+ return -ENOMEM;
+
+ bond[ve->num_bonds].master = master;
+ bond[ve->num_bonds].sibling_mask = sibling->mask;
+
+ ve->bonds = bond;
+ ve->num_bonds++;
+
+ return 0;
+}
+
+void intel_execlists_show_requests(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent),
+ unsigned int max)
+{
+ const struct intel_engine_execlists *execlists = &engine->execlists;
+ struct i915_request *rq, *last;
+ unsigned long flags;
+ unsigned int count;
+ struct rb_node *rb;
+
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ last = NULL;
+ count = 0;
+ list_for_each_entry(rq, &engine->active.requests, sched.link) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\t", 0);
+ else
+ last = rq;
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d executing requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\t", 0);
+ }
+
+ if (execlists->queue_priority_hint != INT_MIN)
+ drm_printf(m, "\t\tQueue priority hint: %d\n",
+ READ_ONCE(execlists->queue_priority_hint));
+
+ last = NULL;
+ count = 0;
+ for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+ struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+ int i;
+
+ priolist_for_each_request(rq, p, i) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\t", 0);
+ else
+ last = rq;
+ }
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d queued requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\t", 0);
+ }
+
+ last = NULL;
+ count = 0;
+ for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ struct i915_request *rq = READ_ONCE(ve->request);
+
+ if (rq) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\t", 0);
+ else
+ last = rq;
+ }
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d virtual requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\t", 0);
+ }
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+bool
+intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine)
+{
+ return engine->set_default_submission ==
+ execlists_set_default_submission;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_execlists.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
new file mode 100644
index 000000000000..a8fd7adefd82
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#ifndef __INTEL_EXECLISTS_SUBMISSION_H__
+#define __INTEL_EXECLISTS_SUBMISSION_H__
+
+#include <linux/types.h>
+
+struct drm_printer;
+
+struct i915_request;
+struct intel_context;
+struct intel_engine_cs;
+
+enum {
+ INTEL_CONTEXT_SCHEDULE_IN = 0,
+ INTEL_CONTEXT_SCHEDULE_OUT,
+ INTEL_CONTEXT_SCHEDULE_PREEMPTED,
+};
+
+int intel_execlists_submission_setup(struct intel_engine_cs *engine);
+
+void intel_execlists_show_requests(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent),
+ unsigned int max);
+
+struct intel_context *
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
+ unsigned int count);
+
+struct intel_context *
+intel_execlists_clone_virtual(struct intel_engine_cs *src);
+
+int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
+ const struct intel_engine_cs *master,
+ const struct intel_engine_cs *sibling);
+
+bool
+intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
+
+#endif /* __INTEL_EXECLISTS_SUBMISSION_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index db8c66dde655..700588bc9d57 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -101,7 +101,16 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
- return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active();
+ if (!intel_vtd_active())
+ return false;
+
+ if (IS_GEN(i915, 5) && IS_MOBILE(i915))
+ return true;
+
+ if (IS_GEN(i915, 12))
+ return true; /* XXX DMAR fault reason 7 */
+
+ return false;
}
void i915_ggtt_suspend(struct i915_ggtt *ggtt)
@@ -1073,7 +1082,12 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
- ggtt->do_idle_maps = needs_idle_maps(i915);
+ if (needs_idle_maps(i915)) {
+ drm_notice(&i915->drm,
+ "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
+ ggtt->do_idle_maps = true;
+ }
+
ggtt->vm.insert_page = i915_ggtt_insert_page;
ggtt->vm.insert_entries = i915_ggtt_insert_entries;
ggtt->vm.clear_range = i915_ggtt_clear_range;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 7fb36b12fe7a..a357bb431815 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -320,13 +320,31 @@ void i915_vma_revoke_fence(struct i915_vma *vma)
fence_write(fence);
}
+static bool fence_is_active(const struct i915_fence_reg *fence)
+{
+ return fence->vma && i915_vma_is_active(fence->vma);
+}
+
static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
{
- struct i915_fence_reg *fence;
+ struct i915_fence_reg *active = NULL;
+ struct i915_fence_reg *fence, *fn;
- list_for_each_entry(fence, &ggtt->fence_list, link) {
+ list_for_each_entry_safe(fence, fn, &ggtt->fence_list, link) {
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
+ if (fence == active) /* now seen this fence twice */
+ active = ERR_PTR(-EAGAIN);
+
+ /* Prefer idle fences so we do not have to wait on the GPU */
+ if (active != ERR_PTR(-EAGAIN) && fence_is_active(fence)) {
+ if (!active)
+ active = fence;
+
+ list_move_tail(&fence->link, &ggtt->fence_list);
+ continue;
+ }
+
if (atomic_read(&fence->pin_count))
continue;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 44f1d51e5ae5..d8e1ab412634 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -46,6 +46,8 @@ void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
int intel_gt_init_mmio(struct intel_gt *gt)
{
+ intel_gt_init_clock_frequency(gt);
+
intel_uc_init_mmio(&gt->uc);
intel_sseu_info_init(gt);
@@ -546,8 +548,6 @@ int intel_gt_init(struct intel_gt *gt)
*/
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
- intel_gt_init_clock_frequency(gt);
-
err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
if (err)
goto out_fw;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index 104cb30e8c13..06d84cf09570 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -145,7 +145,8 @@ static void pool_retire(struct i915_active *ref)
}
static struct intel_gt_buffer_pool_node *
-node_create(struct intel_gt_buffer_pool *pool, size_t sz)
+node_create(struct intel_gt_buffer_pool *pool, size_t sz,
+ enum i915_map_type type)
{
struct intel_gt *gt = to_gt(pool);
struct intel_gt_buffer_pool_node *node;
@@ -169,12 +170,14 @@ node_create(struct intel_gt_buffer_pool *pool, size_t sz)
i915_gem_object_set_readonly(obj);
+ node->type = type;
node->obj = obj;
return node;
}
struct intel_gt_buffer_pool_node *
-intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
+ enum i915_map_type type)
{
struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
struct intel_gt_buffer_pool_node *node;
@@ -191,6 +194,9 @@ intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
if (node->obj->base.size < size)
continue;
+ if (node->type != type)
+ continue;
+
age = READ_ONCE(node->age);
if (!age)
continue;
@@ -205,7 +211,7 @@ intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
rcu_read_unlock();
if (&node->link == list) {
- node = node_create(pool, size);
+ node = node_create(pool, size, type);
if (IS_ERR(node))
return node;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
index 42cbac003e8a..6068f8f1762e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
@@ -15,7 +15,8 @@ struct intel_gt;
struct i915_request;
struct intel_gt_buffer_pool_node *
-intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size);
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
+ enum i915_map_type type);
static inline int
intel_gt_buffer_pool_mark_active(struct intel_gt_buffer_pool_node *node,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
index bcf1658c9633..d8d82c890da8 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
@@ -11,10 +11,9 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include "gem/i915_gem_object_types.h"
#include "i915_active_types.h"
-struct drm_i915_gem_object;
-
struct intel_gt_buffer_pool {
spinlock_t lock;
struct list_head cache_list[4];
@@ -31,6 +30,7 @@ struct intel_gt_buffer_pool_node {
struct rcu_head rcu;
};
unsigned long age;
+ enum i915_map_type type;
};
#endif /* INTEL_GT_BUFFER_POOL_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index 999079686846..a4242ca8dcd7 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -7,34 +7,146 @@
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
-#define MHZ_12 12000000 /* 12MHz (24MHz/2), 83.333ns */
-#define MHZ_12_5 12500000 /* 12.5MHz (25MHz/2), 80ns */
-#define MHZ_19_2 19200000 /* 19.2MHz, 52.083ns */
+static u32 read_reference_ts_freq(struct intel_uncore *uncore)
+{
+ u32 ts_override = intel_uncore_read(uncore, GEN9_TIMESTAMP_OVERRIDE);
+ u32 base_freq, frac_freq;
+
+ base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
+ base_freq *= 1000000;
+
+ frac_freq = ((ts_override &
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
+ frac_freq = 1000000 / (frac_freq + 1);
+
+ return base_freq + frac_freq;
+}
+
+static u32 gen10_get_crystal_clock_freq(struct intel_uncore *uncore,
+ u32 rpm_config_reg)
+{
+ u32 f19_2_mhz = 19200000;
+ u32 f24_mhz = 24000000;
+ u32 crystal_clock =
+ (rpm_config_reg & GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
+ GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-static u32 read_clock_frequency(const struct intel_gt *gt)
+ switch (crystal_clock) {
+ case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
+ return f19_2_mhz;
+ case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
+ return f24_mhz;
+ default:
+ MISSING_CASE(crystal_clock);
+ return 0;
+ }
+}
+
+static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
+ u32 rpm_config_reg)
{
- if (INTEL_GEN(gt->i915) >= 11) {
- u32 config;
-
- config = intel_uncore_read(gt->uncore, RPM_CONFIG0);
- config &= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK;
- config >>= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-
- switch (config) {
- case 0: return MHZ_12;
- case 1:
- case 2: return MHZ_19_2;
- default:
- case 3: return MHZ_12_5;
+ u32 f19_2_mhz = 19200000;
+ u32 f24_mhz = 24000000;
+ u32 f25_mhz = 25000000;
+ u32 f38_4_mhz = 38400000;
+ u32 crystal_clock =
+ (rpm_config_reg & GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
+ GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
+
+ switch (crystal_clock) {
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
+ return f24_mhz;
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
+ return f19_2_mhz;
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
+ return f38_4_mhz;
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
+ return f25_mhz;
+ default:
+ MISSING_CASE(crystal_clock);
+ return 0;
+ }
+}
+
+static u32 read_clock_frequency(struct intel_uncore *uncore)
+{
+ u32 f12_5_mhz = 12500000;
+ u32 f19_2_mhz = 19200000;
+ u32 f24_mhz = 24000000;
+
+ if (INTEL_GEN(uncore->i915) <= 4) {
+ /*
+ * PRMs say:
+ *
+ * "The value in this register increments once every 16
+ * hclks." (through the “Clocking Configuration”
+ * (“CLKCFG”) MCHBAR register)
+ */
+ return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
+ } else if (INTEL_GEN(uncore->i915) <= 8) {
+ /*
+ * PRMs say:
+ *
+ * "The PCU TSC counts 10ns increments; this timestamp
+ * reflects bits 38:3 of the TSC (i.e. 80ns granularity,
+ * rolling over every 1.5 hours).
+ */
+ return f12_5_mhz;
+ } else if (INTEL_GEN(uncore->i915) <= 9) {
+ u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
+ u32 freq = 0;
+
+ if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(uncore);
+ } else {
+ freq = IS_GEN9_LP(uncore->i915) ? f19_2_mhz : f24_mhz;
+
+ /*
+ * Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
+ */
+ freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
+ CTC_SHIFT_PARAMETER_SHIFT);
}
- } else if (INTEL_GEN(gt->i915) >= 9) {
- if (IS_GEN9_LP(gt->i915))
- return MHZ_19_2;
- else
- return MHZ_12;
- } else {
- return MHZ_12_5;
+
+ return freq;
+ } else if (INTEL_GEN(uncore->i915) <= 12) {
+ u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
+ u32 freq = 0;
+
+ /*
+ * First figure out the reference frequency. There are 2 ways
+ * we can compute the frequency, either through the
+ * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
+ * tells us which one we should use.
+ */
+ if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(uncore);
+ } else {
+ u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
+
+ if (INTEL_GEN(uncore->i915) <= 10)
+ freq = gen10_get_crystal_clock_freq(uncore, c0);
+ else
+ freq = gen11_get_crystal_clock_freq(uncore, c0);
+
+ /*
+ * Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
+ */
+ freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
+ GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
+ }
+
+ return freq;
}
+
+ MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
+ return 0;
}
void intel_gt_init_clock_frequency(struct intel_gt *gt)
@@ -43,20 +155,27 @@ void intel_gt_init_clock_frequency(struct intel_gt *gt)
* Note that on gen11+, the clock frequency may be reconfigured.
* We do not, and we assume nobody else does.
*/
- gt->clock_frequency = read_clock_frequency(gt);
+ gt->clock_frequency = read_clock_frequency(gt->uncore);
+ if (gt->clock_frequency)
+ gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
+
GT_TRACE(gt,
- "Using clock frequency: %dkHz\n",
- gt->clock_frequency / 1000);
+ "Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n",
+ gt->clock_frequency / 1000,
+ gt->clock_period_ns,
+ div_u64(mul_u32_u32(gt->clock_period_ns, S32_MAX),
+ USEC_PER_SEC));
+
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
void intel_gt_check_clock_frequency(const struct intel_gt *gt)
{
- if (gt->clock_frequency != read_clock_frequency(gt)) {
+ if (gt->clock_frequency != read_clock_frequency(gt->uncore)) {
dev_err(gt->i915->drm.dev,
"GT clock frequency changed, was %uHz, now %uHz!\n",
gt->clock_frequency,
- read_clock_frequency(gt));
+ read_clock_frequency(gt->uncore));
}
}
#endif
@@ -66,26 +185,24 @@ static u64 div_u64_roundup(u64 nom, u32 den)
return div_u64(nom + den - 1, den);
}
-u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count)
+u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count)
{
- return div_u64_roundup(mul_u32_u32(count, 1000 * 1000 * 1000),
- gt->clock_frequency);
+ return div_u64_roundup(count * NSEC_PER_SEC, gt->clock_frequency);
}
-u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count)
+u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
{
return intel_gt_clock_interval_to_ns(gt, 16 * count);
}
-u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns)
+u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns)
{
- return div_u64_roundup(mul_u32_u32(gt->clock_frequency, ns),
- 1000 * 1000 * 1000);
+ return div_u64_roundup(gt->clock_frequency * ns, NSEC_PER_SEC);
}
-u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns)
+u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns)
{
- u32 val;
+ u64 val;
/*
* Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
@@ -94,9 +211,9 @@ u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns)
* EI/thresholds are "bad", leading to a very sluggish or even
* frozen machine.
*/
- val = DIV_ROUND_UP(intel_gt_ns_to_clock_interval(gt, ns), 16);
+ val = div_u64_roundup(intel_gt_ns_to_clock_interval(gt, ns), 16);
if (IS_GEN(gt->i915, 6))
- val = roundup(val, 25);
+ val = div_u64_roundup(val, 25) * 25;
return val;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
index f793c89f2cbd..8b03e97a85df 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
@@ -18,10 +18,10 @@ void intel_gt_check_clock_frequency(const struct intel_gt *gt);
static inline void intel_gt_check_clock_frequency(const struct intel_gt *gt) {}
#endif
-u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count);
-u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count);
+u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count);
+u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count);
-u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns);
-u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns);
+u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns);
+u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns);
#endif /* __INTEL_GT_CLOCK_UTILS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 257063a57101..9830342aa6f4 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -11,6 +11,7 @@
#include "intel_breadcrumbs.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
+#include "intel_lrc_reg.h"
#include "intel_uncore.h"
#include "intel_rps.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 274aa0dd7050..c94e8ac884eb 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -39,6 +39,28 @@ static void user_forcewake(struct intel_gt *gt, bool suspend)
intel_gt_pm_put(gt);
}
+static void runtime_begin(struct intel_gt *gt)
+{
+ local_irq_disable();
+ write_seqcount_begin(&gt->stats.lock);
+ gt->stats.start = ktime_get();
+ gt->stats.active = true;
+ write_seqcount_end(&gt->stats.lock);
+ local_irq_enable();
+}
+
+static void runtime_end(struct intel_gt *gt)
+{
+ local_irq_disable();
+ write_seqcount_begin(&gt->stats.lock);
+ gt->stats.active = false;
+ gt->stats.total =
+ ktime_add(gt->stats.total,
+ ktime_sub(ktime_get(), gt->stats.start));
+ write_seqcount_end(&gt->stats.lock);
+ local_irq_enable();
+}
+
static int __gt_unpark(struct intel_wakeref *wf)
{
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
@@ -67,6 +89,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
i915_pmu_gt_unparked(i915);
intel_gt_unpark_requests(gt);
+ runtime_begin(gt);
return 0;
}
@@ -79,6 +102,7 @@ static int __gt_park(struct intel_wakeref *wf)
GT_TRACE(gt, "\n");
+ runtime_end(gt);
intel_gt_park_requests(gt);
i915_vma_parked(gt);
@@ -106,6 +130,7 @@ static const struct intel_wakeref_ops wf_ops = {
void intel_gt_pm_init_early(struct intel_gt *gt)
{
intel_wakeref_init(&gt->wakeref, gt->uncore->rpm, &wf_ops);
+ seqcount_mutex_init(&gt->stats.lock, &gt->wakeref.mutex);
}
void intel_gt_pm_init(struct intel_gt *gt)
@@ -339,6 +364,30 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
return intel_uc_runtime_resume(&gt->uc);
}
+static ktime_t __intel_gt_get_awake_time(const struct intel_gt *gt)
+{
+ ktime_t total = gt->stats.total;
+
+ if (gt->stats.active)
+ total = ktime_add(total,
+ ktime_sub(ktime_get(), gt->stats.start));
+
+ return total;
+}
+
+ktime_t intel_gt_get_awake_time(const struct intel_gt *gt)
+{
+ unsigned int seq;
+ ktime_t total;
+
+ do {
+ seq = read_seqcount_begin(&gt->stats.lock);
+ total = __intel_gt_get_awake_time(gt);
+ } while (read_seqcount_retry(&gt->stats.lock, seq));
+
+ return total;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_gt_pm.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 60f0e2fbe55c..63846a856e7e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -58,6 +58,8 @@ int intel_gt_resume(struct intel_gt *gt);
void intel_gt_runtime_suspend(struct intel_gt *gt);
int intel_gt_runtime_resume(struct intel_gt *gt);
+ktime_t intel_gt_get_awake_time(const struct intel_gt *gt);
+
static inline bool is_mock_gt(const struct intel_gt *gt)
{
return I915_SELFTEST_ONLY(gt->awake == -ENODEV);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 66fcbf9d0fdd..dc06c78c9eeb 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -135,13 +135,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
struct intel_gt_timelines *timelines = &gt->timelines;
struct intel_timeline *tl, *tn;
unsigned long active_count = 0;
- bool interruptible;
LIST_HEAD(free);
- interruptible = true;
- if (unlikely(timeout < 0))
- timeout = -timeout, interruptible = false;
-
flush_submission(gt, timeout); /* kick the ksoftirqd tasklets */
spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
@@ -163,7 +158,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
mutex_unlock(&tl->mutex);
timeout = dma_fence_wait_timeout(fence,
- interruptible,
+ true,
timeout);
dma_fence_put(fence);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 6d39a4a11bf3..a83d3e18254d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -75,6 +75,7 @@ struct intel_gt {
intel_wakeref_t awake;
u32 clock_frequency;
+ u32 clock_period_ns;
struct intel_llc llc;
struct intel_rc6 rc6;
@@ -87,6 +88,30 @@ struct intel_gt {
u32 pm_guc_events;
+ struct {
+ bool active;
+
+ /**
+ * @lock: Lock protecting the below fields.
+ */
+ seqcount_mutex_t lock;
+
+ /**
+ * @total: Total time this engine was busy.
+ *
+ * Accumulated time not counting the most recent block in cases
+ * where engine is currently busy (active > 0).
+ */
+ ktime_t total;
+
+ /**
+ * @start: Timestamp of the last idle to active transition.
+ *
+ * Idle is defined as active == 0, active is active > 0.
+ */
+ ktime_t start;
+ } stats;
+
struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
[MAX_ENGINE_INSTANCE + 1];
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 7bfe9072be9a..04aa6601e984 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -422,6 +422,35 @@ void setup_private_pat(struct intel_uncore *uncore)
bdw_setup_private_ppat(uncore);
}
+struct i915_vma *
+__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0,
+ i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_gtt.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 8a33940a71f3..29c10fde8ce3 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -573,6 +573,9 @@ int i915_vm_pin_pt_stash(struct i915_address_space *vm,
void i915_vm_free_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash);
+struct i915_vma *
+__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
+
static inline struct sgt_dma {
struct scatterlist *sg;
dma_addr_t dma, max;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 26c7d0a50585..94f485b591af 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1,599 +1,23 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Ben Widawsky <ben@bwidawsk.net>
- * Michel Thierry <michel.thierry@intel.com>
- * Thomas Daniel <thomas.daniel@intel.com>
- * Oscar Mateo <oscar.mateo@intel.com>
- *
- */
-
-/**
- * DOC: Logical Rings, Logical Ring Contexts and Execlists
- *
- * Motivation:
- * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
- * These expanded contexts enable a number of new abilities, especially
- * "Execlists" (also implemented in this file).
- *
- * One of the main differences with the legacy HW contexts is that logical
- * ring contexts incorporate many more things to the context's state, like
- * PDPs or ringbuffer control registers:
- *
- * The reason why PDPs are included in the context is straightforward: as
- * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
- * contained there mean you don't need to do a ppgtt->switch_mm yourself,
- * instead, the GPU will do it for you on the context switch.
- *
- * But, what about the ringbuffer control registers (head, tail, etc..)?
- * shouldn't we just need a set of those per engine command streamer? This is
- * where the name "Logical Rings" starts to make sense: by virtualizing the
- * rings, the engine cs shifts to a new "ring buffer" with every context
- * switch. When you want to submit a workload to the GPU you: A) choose your
- * context, B) find its appropriate virtualized ring, C) write commands to it
- * and then, finally, D) tell the GPU to switch to that context.
- *
- * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
- * to a contexts is via a context execution list, ergo "Execlists".
- *
- * LRC implementation:
- * Regarding the creation of contexts, we have:
- *
- * - One global default context.
- * - One local default context for each opened fd.
- * - One local extra context for each context create ioctl call.
- *
- * Now that ringbuffers belong per-context (and not per-engine, like before)
- * and that contexts are uniquely tied to a given engine (and not reusable,
- * like before) we need:
- *
- * - One ringbuffer per-engine inside each context.
- * - One backing object per-engine inside each context.
- *
- * The global default context starts its life with these new objects fully
- * allocated and populated. The local default context for each opened fd is
- * more complex, because we don't know at creation time which engine is going
- * to use them. To handle this, we have implemented a deferred creation of LR
- * contexts:
- *
- * The local context starts its life as a hollow or blank holder, that only
- * gets populated for a given engine once we receive an execbuffer. If later
- * on we receive another execbuffer ioctl for the same context but a different
- * engine, we allocate/populate a new ringbuffer and context backing object and
- * so on.
- *
- * Finally, regarding local contexts created using the ioctl call: as they are
- * only allowed with the render ring, we can allocate & populate them right
- * away (no need to defer anything, at least for now).
- *
- * Execlists implementation:
- * Execlists are the new method by which, on gen8+ hardware, workloads are
- * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
- * This method works as follows:
- *
- * When a request is committed, its commands (the BB start and any leading or
- * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
- * for the appropriate context. The tail pointer in the hardware context is not
- * updated at this time, but instead, kept by the driver in the ringbuffer
- * structure. A structure representing this request is added to a request queue
- * for the appropriate engine: this structure contains a copy of the context's
- * tail after the request was written to the ring buffer and a pointer to the
- * context itself.
- *
- * If the engine's request queue was empty before the request was added, the
- * queue is processed immediately. Otherwise the queue will be processed during
- * a context switch interrupt. In any case, elements on the queue will get sent
- * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
- * globally unique 20-bits submission ID.
- *
- * When execution of a request completes, the GPU updates the context status
- * buffer with a context complete event and generates a context switch interrupt.
- * During the interrupt handling, the driver examines the events in the buffer:
- * for each context complete event, if the announced ID matches that on the head
- * of the request queue, then that request is retired and removed from the queue.
- *
- * After processing, if any requests were retired and the queue is not empty
- * then a new execution list can be submitted. The two requests at the front of
- * the queue are next to be submitted but since a context may not occur twice in
- * an execution list, if subsequent requests have the same ID as the first then
- * the two requests must be combined. This is done simply by discarding requests
- * at the head of the queue until either only one requests is left (in which case
- * we use a NULL second context) or the first two requests have unique IDs.
- *
- * By always executing the first two requests in the queue the driver ensures
- * that the GPU is kept as busy as possible. In the case where a single context
- * completes but a second context is still executing, the request for this second
- * context will be at the head of the queue when we remove the first one. This
- * request will then be resubmitted along with a new request for a different context,
- * which will cause the hardware to continue executing the second request and queue
- * the new request (the GPU detects the condition of a context getting preempted
- * with the same context and optimizes the context switch flow by not doing
- * preemption, but just sampling the new tail pointer).
- *
*/
-#include <linux/interrupt.h>
+#include "gen8_engine_cs.h"
#include "i915_drv.h"
#include "i915_perf.h"
-#include "i915_trace.h"
-#include "i915_vgpu.h"
-#include "intel_breadcrumbs.h"
-#include "intel_context.h"
-#include "intel_engine_pm.h"
+#include "intel_engine.h"
+#include "intel_gpu_commands.h"
#include "intel_gt.h"
-#include "intel_gt_pm.h"
-#include "intel_gt_requests.h"
+#include "intel_lrc.h"
#include "intel_lrc_reg.h"
-#include "intel_mocs.h"
-#include "intel_reset.h"
#include "intel_ring.h"
-#include "intel_workarounds.h"
#include "shmem_utils.h"
-#define RING_EXECLIST_QFULL (1 << 0x2)
-#define RING_EXECLIST1_VALID (1 << 0x3)
-#define RING_EXECLIST0_VALID (1 << 0x4)
-#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
-#define RING_EXECLIST1_ACTIVE (1 << 0x11)
-#define RING_EXECLIST0_ACTIVE (1 << 0x12)
-
-#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
-#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
-#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
-#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
-#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
-#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
-
-#define GEN8_CTX_STATUS_COMPLETED_MASK \
- (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
-
-#define CTX_DESC_FORCE_RESTORE BIT_ULL(2)
-
-#define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE (0x1) /* lower csb dword */
-#define GEN12_CTX_SWITCH_DETAIL(csb_dw) ((csb_dw) & 0xF) /* upper csb dword */
-#define GEN12_CSB_SW_CTX_ID_MASK GENMASK(25, 15)
-#define GEN12_IDLE_CTX_ID 0x7FF
-#define GEN12_CSB_CTX_VALID(csb_dw) \
- (FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID)
-
-/* Typical size of the average request (2 pipecontrols and a MI_BB) */
-#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
-
-struct virtual_engine {
- struct intel_engine_cs base;
- struct intel_context context;
- struct rcu_work rcu;
-
- /*
- * We allow only a single request through the virtual engine at a time
- * (each request in the timeline waits for the completion fence of
- * the previous before being submitted). By restricting ourselves to
- * only submitting a single request, each request is placed on to a
- * physical to maximise load spreading (by virtue of the late greedy
- * scheduling -- each real engine takes the next available request
- * upon idling).
- */
- struct i915_request *request;
-
- /*
- * We keep a rbtree of available virtual engines inside each physical
- * engine, sorted by priority. Here we preallocate the nodes we need
- * for the virtual engine, indexed by physical_engine->id.
- */
- struct ve_node {
- struct rb_node rb;
- int prio;
- } nodes[I915_NUM_ENGINES];
-
- /*
- * Keep track of bonded pairs -- restrictions upon on our selection
- * of physical engines any particular request may be submitted to.
- * If we receive a submit-fence from a master engine, we will only
- * use one of sibling_mask physical engines.
- */
- struct ve_bond {
- const struct intel_engine_cs *master;
- intel_engine_mask_t sibling_mask;
- } *bonds;
- unsigned int num_bonds;
-
- /* And finally, which physical engines this virtual engine maps onto. */
- unsigned int num_siblings;
- struct intel_engine_cs *siblings[];
-};
-
-static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
-{
- GEM_BUG_ON(!intel_engine_is_virtual(engine));
- return container_of(engine, struct virtual_engine, base);
-}
-
-static int __execlists_context_alloc(struct intel_context *ce,
- struct intel_engine_cs *engine);
-
-static void execlists_init_reg_state(u32 *reg_state,
- const struct intel_context *ce,
- const struct intel_engine_cs *engine,
- const struct intel_ring *ring,
- bool close);
-static void
-__execlists_update_reg_state(const struct intel_context *ce,
- const struct intel_engine_cs *engine,
- u32 head);
-
-static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
-{
- if (INTEL_GEN(engine->i915) >= 12)
- return 0x60;
- else if (INTEL_GEN(engine->i915) >= 9)
- return 0x54;
- else if (engine->class == RENDER_CLASS)
- return 0x58;
- else
- return -1;
-}
-
-static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
-{
- if (INTEL_GEN(engine->i915) >= 12)
- return 0x74;
- else if (INTEL_GEN(engine->i915) >= 9)
- return 0x68;
- else if (engine->class == RENDER_CLASS)
- return 0xd8;
- else
- return -1;
-}
-
-static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine)
-{
- if (INTEL_GEN(engine->i915) >= 12)
- return 0x12;
- else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS)
- return 0x18;
- else
- return -1;
-}
-
-static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine)
-{
- int x;
-
- x = lrc_ring_wa_bb_per_ctx(engine);
- if (x < 0)
- return x;
-
- return x + 2;
-}
-
-static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
-{
- int x;
-
- x = lrc_ring_indirect_ptr(engine);
- if (x < 0)
- return x;
-
- return x + 2;
-}
-
-static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
-{
- if (engine->class != RENDER_CLASS)
- return -1;
-
- if (INTEL_GEN(engine->i915) >= 12)
- return 0xb6;
- else if (INTEL_GEN(engine->i915) >= 11)
- return 0xaa;
- else
- return -1;
-}
-
-static u32
-lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
-{
- switch (INTEL_GEN(engine->i915)) {
- default:
- MISSING_CASE(INTEL_GEN(engine->i915));
- fallthrough;
- case 12:
- return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- case 11:
- return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- case 10:
- return GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- case 9:
- return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- case 8:
- return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- }
-}
-
-static void
-lrc_ring_setup_indirect_ctx(u32 *regs,
- const struct intel_engine_cs *engine,
- u32 ctx_bb_ggtt_addr,
- u32 size)
-{
- GEM_BUG_ON(!size);
- GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES));
- GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1);
- regs[lrc_ring_indirect_ptr(engine) + 1] =
- ctx_bb_ggtt_addr | (size / CACHELINE_BYTES);
-
- GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1);
- regs[lrc_ring_indirect_offset(engine) + 1] =
- lrc_ring_indirect_offset_default(engine) << 6;
-}
-
-static u32 intel_context_get_runtime(const struct intel_context *ce)
-{
- /*
- * We can use either ppHWSP[16] which is recorded before the context
- * switch (and so excludes the cost of context switches) or use the
- * value from the context image itself, which is saved/restored earlier
- * and so includes the cost of the save.
- */
- return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
-}
-
-static void mark_eio(struct i915_request *rq)
-{
- if (i915_request_completed(rq))
- return;
-
- GEM_BUG_ON(i915_request_signaled(rq));
-
- i915_request_set_error_once(rq, -EIO);
- i915_request_mark_complete(rq);
-}
-
-static struct i915_request *
-active_request(const struct intel_timeline * const tl, struct i915_request *rq)
-{
- struct i915_request *active = rq;
-
- rcu_read_lock();
- list_for_each_entry_continue_reverse(rq, &tl->requests, link) {
- if (i915_request_completed(rq))
- break;
-
- active = rq;
- }
- rcu_read_unlock();
-
- return active;
-}
-
-static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
-{
- return (i915_ggtt_offset(engine->status_page.vma) +
- I915_GEM_HWS_PREEMPT_ADDR);
-}
-
-static inline void
-ring_set_paused(const struct intel_engine_cs *engine, int state)
-{
- /*
- * We inspect HWS_PREEMPT with a semaphore inside
- * engine->emit_fini_breadcrumb. If the dword is true,
- * the ring is paused as the semaphore will busywait
- * until the dword is false.
- */
- engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state;
- if (state)
- wmb();
-}
-
-static inline struct i915_priolist *to_priolist(struct rb_node *rb)
-{
- return rb_entry(rb, struct i915_priolist, node);
-}
-
-static inline int rq_prio(const struct i915_request *rq)
-{
- return READ_ONCE(rq->sched.attr.priority);
-}
-
-static int effective_prio(const struct i915_request *rq)
-{
- int prio = rq_prio(rq);
-
- /*
- * If this request is special and must not be interrupted at any
- * cost, so be it. Note we are only checking the most recent request
- * in the context and so may be masking an earlier vip request. It
- * is hoped that under the conditions where nopreempt is used, this
- * will not matter (i.e. all requests to that context will be
- * nopreempt for as long as desired).
- */
- if (i915_request_has_nopreempt(rq))
- prio = I915_PRIORITY_UNPREEMPTABLE;
-
- return prio;
-}
-
-static int queue_prio(const struct intel_engine_execlists *execlists)
-{
- struct i915_priolist *p;
- struct rb_node *rb;
-
- rb = rb_first_cached(&execlists->queue);
- if (!rb)
- return INT_MIN;
-
- /*
- * As the priolist[] are inverted, with the highest priority in [0],
- * we have to flip the index value to become priority.
- */
- p = to_priolist(rb);
- if (!I915_USER_PRIORITY_SHIFT)
- return p->priority;
-
- return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
-}
-
-static inline bool need_preempt(const struct intel_engine_cs *engine,
- const struct i915_request *rq,
- struct rb_node *rb)
-{
- int last_prio;
-
- if (!intel_engine_has_semaphores(engine))
- return false;
-
- /*
- * Check if the current priority hint merits a preemption attempt.
- *
- * We record the highest value priority we saw during rescheduling
- * prior to this dequeue, therefore we know that if it is strictly
- * less than the current tail of ESLP[0], we do not need to force
- * a preempt-to-idle cycle.
- *
- * However, the priority hint is a mere hint that we may need to
- * preempt. If that hint is stale or we may be trying to preempt
- * ourselves, ignore the request.
- *
- * More naturally we would write
- * prio >= max(0, last);
- * except that we wish to prevent triggering preemption at the same
- * priority level: the task that is running should remain running
- * to preserve FIFO ordering of dependencies.
- */
- last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1);
- if (engine->execlists.queue_priority_hint <= last_prio)
- return false;
-
- /*
- * Check against the first request in ELSP[1], it will, thanks to the
- * power of PI, be the highest priority of that context.
- */
- if (!list_is_last(&rq->sched.link, &engine->active.requests) &&
- rq_prio(list_next_entry(rq, sched.link)) > last_prio)
- return true;
-
- if (rb) {
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- bool preempt = false;
-
- if (engine == ve->siblings[0]) { /* only preempt one sibling */
- struct i915_request *next;
-
- rcu_read_lock();
- next = READ_ONCE(ve->request);
- if (next)
- preempt = rq_prio(next) > last_prio;
- rcu_read_unlock();
- }
-
- if (preempt)
- return preempt;
- }
-
- /*
- * If the inflight context did not trigger the preemption, then maybe
- * it was the set of queued requests? Pick the highest priority in
- * the queue (the first active priolist) and see if it deserves to be
- * running instead of ELSP[0].
- *
- * The highest priority request in the queue can not be either
- * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
- * context, it's priority would not exceed ELSP[0] aka last_prio.
- */
- return queue_prio(&engine->execlists) > last_prio;
-}
-
-__maybe_unused static inline bool
-assert_priority_queue(const struct i915_request *prev,
- const struct i915_request *next)
-{
- /*
- * Without preemption, the prev may refer to the still active element
- * which we refuse to let go.
- *
- * Even with preemption, there are times when we think it is better not
- * to preempt and leave an ostensibly lower priority request in flight.
- */
- if (i915_request_is_active(prev))
- return true;
-
- return rq_prio(prev) >= rq_prio(next);
-}
-
-/*
- * The context descriptor encodes various attributes of a context,
- * including its GTT address and some flags. Because it's fairly
- * expensive to calculate, we'll just do it once and cache the result,
- * which remains valid until the context is unpinned.
- *
- * This is what a descriptor looks like, from LSB to MSB::
- *
- * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
- * bits 12-31: LRCA, GTT address of (the HWSP of) this context
- * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC)
- * bits 53-54: mbz, reserved for use by hardware
- * bits 55-63: group ID, currently unused and set to 0
- *
- * Starting from Gen11, the upper dword of the descriptor has a new format:
- *
- * bits 32-36: reserved
- * bits 37-47: SW context ID
- * bits 48:53: engine instance
- * bit 54: mbz, reserved for use by hardware
- * bits 55-60: SW counter
- * bits 61-63: engine class
- *
- * engine info, SW context ID and SW counter need to form a unique number
- * (Context ID) per lrc.
- */
-static u32
-lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
-{
- u32 desc;
-
- desc = INTEL_LEGACY_32B_CONTEXT;
- if (i915_vm_is_4lvl(ce->vm))
- desc = INTEL_LEGACY_64B_CONTEXT;
- desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
-
- desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
- if (IS_GEN(engine->i915, 8))
- desc |= GEN8_CTX_L3LLC_COHERENT;
-
- return i915_ggtt_offset(ce->state) | desc;
-}
-
-static inline unsigned int dword_in_page(void *addr)
-{
- return offset_in_page(addr) / sizeof(u32);
-}
-
static void set_offsets(u32 *regs,
const u8 *data,
const struct intel_engine_cs *engine,
- bool clear)
+ bool close)
#define NOP(x) (BIT(7) | (x))
#define LRI(count, flags) ((flags) << 6 | (count) | BUILD_BUG_ON_ZERO(count >= BIT(6)))
#define POSTED BIT(0)
@@ -601,7 +25,7 @@ static void set_offsets(u32 *regs,
#define REG16(x) \
(((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
(((x) >> 2) & 0x7f)
-#define END(total_state_size) 0, (total_state_size)
+#define END 0
{
const u32 base = engine->mmio_base;
@@ -610,8 +34,6 @@ static void set_offsets(u32 *regs,
if (*data & BIT(7)) { /* skip */
count = *data++ & ~BIT(7);
- if (clear)
- memset32(regs, MI_NOOP, count);
regs += count;
continue;
}
@@ -639,19 +61,11 @@ static void set_offsets(u32 *regs,
} while (v & BIT(7));
regs[0] = base + (offset << 2);
- if (clear)
- regs[1] = 0;
regs += 2;
} while (--count);
}
- if (clear) {
- u8 count = *++data;
-
- /* Clear past the tail for HW access */
- GEM_BUG_ON(dword_in_page(regs) > count);
- memset32(regs, MI_NOOP, count - dword_in_page(regs));
-
+ if (close) {
/* Close the batch; used mainly by live_lrc_layout() */
*regs = MI_BATCH_BUFFER_END;
if (INTEL_GEN(engine->i915) >= 10)
@@ -691,7 +105,7 @@ static const u8 gen8_xcs_offsets[] = {
REG16(0x200),
REG(0x028),
- END(80)
+ END
};
static const u8 gen9_xcs_offsets[] = {
@@ -775,7 +189,7 @@ static const u8 gen9_xcs_offsets[] = {
REG16(0x67c),
REG(0x068),
- END(176)
+ END
};
static const u8 gen12_xcs_offsets[] = {
@@ -807,7 +221,7 @@ static const u8 gen12_xcs_offsets[] = {
REG16(0x274),
REG16(0x270),
- END(80)
+ END
};
static const u8 gen8_rcs_offsets[] = {
@@ -844,7 +258,7 @@ static const u8 gen8_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END(80)
+ END
};
static const u8 gen9_rcs_offsets[] = {
@@ -928,7 +342,7 @@ static const u8 gen9_rcs_offsets[] = {
REG16(0x67c),
REG(0x68),
- END(176)
+ END
};
static const u8 gen11_rcs_offsets[] = {
@@ -969,7 +383,7 @@ static const u8 gen11_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END(80)
+ END
};
static const u8 gen12_rcs_offsets[] = {
@@ -1065,7 +479,7 @@ static const u8 gen12_rcs_offsets[] = {
REG(0x084),
NOP(1),
- END(192)
+ END
};
#undef END
@@ -1104,2284 +518,440 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
}
}
-static struct i915_request *
-__unwind_incomplete_requests(struct intel_engine_cs *engine)
-{
- struct i915_request *rq, *rn, *active = NULL;
- struct list_head *pl;
- int prio = I915_PRIORITY_INVALID;
-
- lockdep_assert_held(&engine->active.lock);
-
- list_for_each_entry_safe_reverse(rq, rn,
- &engine->active.requests,
- sched.link) {
- if (i915_request_completed(rq))
- continue; /* XXX */
-
- __i915_request_unsubmit(rq);
-
- /*
- * Push the request back into the queue for later resubmission.
- * If this request is not native to this physical engine (i.e.
- * it came from a virtual source), push it back onto the virtual
- * engine so that it can be moved across onto another physical
- * engine as load dictates.
- */
- if (likely(rq->execution_mask == engine->mask)) {
- GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
- if (rq_prio(rq) != prio) {
- prio = rq_prio(rq);
- pl = i915_sched_lookup_priolist(engine, prio);
- }
- GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
-
- list_move(&rq->sched.link, pl);
- set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-
- /* Check in case we rollback so far we wrap [size/2] */
- if (intel_ring_direction(rq->ring,
- rq->tail,
- rq->ring->tail + 8) > 0)
- rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
-
- active = rq;
- } else {
- struct intel_engine_cs *owner = rq->context->engine;
-
- WRITE_ONCE(rq->engine, owner);
- owner->submit_request(rq);
- active = NULL;
- }
- }
-
- return active;
-}
-
-struct i915_request *
-execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
-{
- struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
-
- return __unwind_incomplete_requests(engine);
-}
-
-static inline void
-execlists_context_status_change(struct i915_request *rq, unsigned long status)
+static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
{
- /*
- * Only used when GVT-g is enabled now. When GVT-g is disabled,
- * The compiler should eliminate this function as dead-code.
- */
- if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
- return;
-
- atomic_notifier_call_chain(&rq->engine->context_status_notifier,
- status, rq);
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x60;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return 0x54;
+ else if (engine->class == RENDER_CLASS)
+ return 0x58;
+ else
+ return -1;
}
-static void intel_engine_context_in(struct intel_engine_cs *engine)
+static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
{
- unsigned long flags;
-
- if (atomic_add_unless(&engine->stats.active, 1, 0))
- return;
-
- write_seqlock_irqsave(&engine->stats.lock, flags);
- if (!atomic_add_unless(&engine->stats.active, 1, 0)) {
- engine->stats.start = ktime_get();
- atomic_inc(&engine->stats.active);
- }
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x74;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return 0x68;
+ else if (engine->class == RENDER_CLASS)
+ return 0xd8;
+ else
+ return -1;
}
-static void intel_engine_context_out(struct intel_engine_cs *engine)
+static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine)
{
- unsigned long flags;
-
- GEM_BUG_ON(!atomic_read(&engine->stats.active));
-
- if (atomic_add_unless(&engine->stats.active, -1, 1))
- return;
-
- write_seqlock_irqsave(&engine->stats.lock, flags);
- if (atomic_dec_and_test(&engine->stats.active)) {
- engine->stats.total =
- ktime_add(engine->stats.total,
- ktime_sub(ktime_get(), engine->stats.start));
- }
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x12;
+ else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS)
+ return 0x18;
+ else
+ return -1;
}
-static void
-execlists_check_context(const struct intel_context *ce,
- const struct intel_engine_cs *engine,
- const char *when)
+static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine)
{
- const struct intel_ring *ring = ce->ring;
- u32 *regs = ce->lrc_reg_state;
- bool valid = true;
int x;
- if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) {
- pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n",
- engine->name,
- regs[CTX_RING_START],
- i915_ggtt_offset(ring->vma));
- regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
- valid = false;
- }
-
- if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) !=
- (RING_CTL_SIZE(ring->size) | RING_VALID)) {
- pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n",
- engine->name,
- regs[CTX_RING_CTL],
- (u32)(RING_CTL_SIZE(ring->size) | RING_VALID));
- regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
- valid = false;
- }
-
- x = lrc_ring_mi_mode(engine);
- if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) {
- pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n",
- engine->name, regs[x + 1]);
- regs[x + 1] &= ~STOP_RING;
- regs[x + 1] |= STOP_RING << 16;
- valid = false;
- }
+ x = lrc_ring_wa_bb_per_ctx(engine);
+ if (x < 0)
+ return x;
- WARN_ONCE(!valid, "Invalid lrc state found %s submission\n", when);
+ return x + 2;
}
-static void restore_default_state(struct intel_context *ce,
- struct intel_engine_cs *engine)
+static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
{
- u32 *regs;
+ int x;
- regs = memset(ce->lrc_reg_state, 0, engine->context_size - PAGE_SIZE);
- execlists_init_reg_state(regs, ce, engine, ce->ring, true);
+ x = lrc_ring_indirect_ptr(engine);
+ if (x < 0)
+ return x;
- ce->runtime.last = intel_context_get_runtime(ce);
+ return x + 2;
}
-static void reset_active(struct i915_request *rq,
- struct intel_engine_cs *engine)
+static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
{
- struct intel_context * const ce = rq->context;
- u32 head;
-
- /*
- * The executing context has been cancelled. We want to prevent
- * further execution along this context and propagate the error on
- * to anything depending on its results.
- *
- * In __i915_request_submit(), we apply the -EIO and remove the
- * requests' payloads for any banned requests. But first, we must
- * rewind the context back to the start of the incomplete request so
- * that we do not jump back into the middle of the batch.
- *
- * We preserve the breadcrumbs and semaphores of the incomplete
- * requests so that inter-timeline dependencies (i.e other timelines)
- * remain correctly ordered. And we defer to __i915_request_submit()
- * so that all asynchronous waits are correctly handled.
- */
- ENGINE_TRACE(engine, "{ rq=%llx:%lld }\n",
- rq->fence.context, rq->fence.seqno);
+ if (engine->class != RENDER_CLASS)
+ return -1;
- /* On resubmission of the active request, payload will be scrubbed */
- if (i915_request_completed(rq))
- head = rq->tail;
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0xb6;
+ else if (INTEL_GEN(engine->i915) >= 11)
+ return 0xaa;
else
- head = active_request(ce->timeline, rq)->head;
- head = intel_ring_wrap(ce->ring, head);
-
- /* Scrub the context image to prevent replaying the previous batch */
- restore_default_state(ce, engine);
- __execlists_update_reg_state(ce, engine, head);
-
- /* We've switched away, so this should be a no-op, but intent matters */
- ce->lrc.desc |= CTX_DESC_FORCE_RESTORE;
-}
-
-static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
-{
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
- ce->runtime.num_underflow += dt < 0;
- ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt);
-#endif
+ return -1;
}
-static void intel_context_update_runtime(struct intel_context *ce)
+static u32
+lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
{
- u32 old;
- s32 dt;
-
- if (intel_context_is_barrier(ce))
- return;
-
- old = ce->runtime.last;
- ce->runtime.last = intel_context_get_runtime(ce);
- dt = ce->runtime.last - old;
-
- if (unlikely(dt <= 0)) {
- CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
- old, ce->runtime.last, dt);
- st_update_runtime_underflow(ce, dt);
- return;
+ switch (INTEL_GEN(engine->i915)) {
+ default:
+ MISSING_CASE(INTEL_GEN(engine->i915));
+ fallthrough;
+ case 12:
+ return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 11:
+ return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 10:
+ return GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 9:
+ return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 8:
+ return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
}
-
- ewma_runtime_add(&ce->runtime.avg, dt);
- ce->runtime.total += dt;
}
-static inline struct intel_engine_cs *
-__execlists_schedule_in(struct i915_request *rq)
+static void
+lrc_setup_indirect_ctx(u32 *regs,
+ const struct intel_engine_cs *engine,
+ u32 ctx_bb_ggtt_addr,
+ u32 size)
{
- struct intel_engine_cs * const engine = rq->engine;
- struct intel_context * const ce = rq->context;
-
- intel_context_get(ce);
-
- if (unlikely(intel_context_is_banned(ce)))
- reset_active(rq, engine);
-
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- execlists_check_context(ce, engine, "before");
-
- if (ce->tag) {
- /* Use a fixed tag for OA and friends */
- GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
- ce->lrc.ccid = ce->tag;
- } else {
- /* We don't need a strict matching tag, just different values */
- unsigned int tag = ffs(READ_ONCE(engine->context_tag));
-
- GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG);
- clear_bit(tag - 1, &engine->context_tag);
- ce->lrc.ccid = tag << (GEN11_SW_CTX_ID_SHIFT - 32);
-
- BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID);
- }
-
- ce->lrc.ccid |= engine->execlists.ccid;
-
- __intel_gt_pm_get(engine->gt);
- if (engine->fw_domain && !atomic_fetch_inc(&engine->fw_active))
- intel_uncore_forcewake_get(engine->uncore, engine->fw_domain);
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
- intel_engine_context_in(engine);
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES));
+ GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1);
+ regs[lrc_ring_indirect_ptr(engine) + 1] =
+ ctx_bb_ggtt_addr | (size / CACHELINE_BYTES);
- return engine;
+ GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1);
+ regs[lrc_ring_indirect_offset(engine) + 1] =
+ lrc_ring_indirect_offset_default(engine) << 6;
}
-static inline struct i915_request *
-execlists_schedule_in(struct i915_request *rq, int idx)
+static void init_common_regs(u32 * const regs,
+ const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ bool inhibit)
{
- struct intel_context * const ce = rq->context;
- struct intel_engine_cs *old;
-
- GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
- trace_i915_request_in(rq, idx);
-
- old = READ_ONCE(ce->inflight);
- do {
- if (!old) {
- WRITE_ONCE(ce->inflight, __execlists_schedule_in(rq));
- break;
- }
- } while (!try_cmpxchg(&ce->inflight, &old, ptr_inc(old)));
-
- GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
- return i915_request_get(rq);
-}
+ u32 ctl;
-static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
-{
- struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
- struct i915_request *next = READ_ONCE(ve->request);
+ ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
+ ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ if (inhibit)
+ ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
+ if (INTEL_GEN(engine->i915) < 11)
+ ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
+ CTX_CTRL_RS_CTX_ENABLE);
+ regs[CTX_CONTEXT_CONTROL] = ctl;
- if (next == rq || (next && next->execution_mask & ~rq->execution_mask))
- tasklet_hi_schedule(&ve->base.execlists.tasklet);
+ regs[CTX_TIMESTAMP] = ce->runtime.last;
}
-static inline void
-__execlists_schedule_out(struct i915_request *rq,
- struct intel_engine_cs * const engine,
- unsigned int ccid)
+static void init_wa_bb_regs(u32 * const regs,
+ const struct intel_engine_cs *engine)
{
- struct intel_context * const ce = rq->context;
-
- /*
- * NB process_csb() is not under the engine->active.lock and hence
- * schedule_out can race with schedule_in meaning that we should
- * refrain from doing non-trivial work here.
- */
+ const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx;
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- execlists_check_context(ce, engine, "after");
+ if (wa_ctx->per_ctx.size) {
+ const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
- /*
- * If we have just completed this context, the engine may now be
- * idle and we want to re-enter powersaving.
- */
- if (list_is_last_rcu(&rq->link, &ce->timeline->requests) &&
- i915_request_completed(rq))
- intel_engine_add_retire(engine, ce->timeline);
-
- ccid >>= GEN11_SW_CTX_ID_SHIFT - 32;
- ccid &= GEN12_MAX_CONTEXT_HW_ID;
- if (ccid < BITS_PER_LONG) {
- GEM_BUG_ON(ccid == 0);
- GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag));
- set_bit(ccid - 1, &engine->context_tag);
+ GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1);
+ regs[lrc_ring_wa_bb_per_ctx(engine) + 1] =
+ (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
}
- intel_context_update_runtime(ce);
- intel_engine_context_out(engine);
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
- if (engine->fw_domain && !atomic_dec_return(&engine->fw_active))
- intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
- intel_gt_pm_put_async(engine->gt);
-
- /*
- * If this is part of a virtual engine, its next request may
- * have been blocked waiting for access to the active context.
- * We have to kick all the siblings again in case we need to
- * switch (e.g. the next request is not runnable on this
- * engine). Hopefully, we will already have submitted the next
- * request before the tasklet runs and do not need to rebuild
- * each virtual tree and kick everyone again.
- */
- if (ce->engine != engine)
- kick_siblings(rq, ce);
-
- intel_context_put(ce);
-}
-
-static inline void
-execlists_schedule_out(struct i915_request *rq)
-{
- struct intel_context * const ce = rq->context;
- struct intel_engine_cs *cur, *old;
- u32 ccid;
-
- trace_i915_request_out(rq);
-
- ccid = rq->context->lrc.ccid;
- old = READ_ONCE(ce->inflight);
- do
- cur = ptr_unmask_bits(old, 2) ? ptr_dec(old) : NULL;
- while (!try_cmpxchg(&ce->inflight, &old, cur));
- if (!cur)
- __execlists_schedule_out(rq, old, ccid);
-
- i915_request_put(rq);
-}
-
-static u64 execlists_update_context(struct i915_request *rq)
-{
- struct intel_context *ce = rq->context;
- u64 desc = ce->lrc.desc;
- u32 tail, prev;
-
- /*
- * WaIdleLiteRestore:bdw,skl
- *
- * We should never submit the context with the same RING_TAIL twice
- * just in case we submit an empty ring, which confuses the HW.
- *
- * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of
- * the normal request to be able to always advance the RING_TAIL on
- * subsequent resubmissions (for lite restore). Should that fail us,
- * and we try and submit the same tail again, force the context
- * reload.
- *
- * If we need to return to a preempted context, we need to skip the
- * lite-restore and force it to reload the RING_TAIL. Otherwise, the
- * HW has a tendency to ignore us rewinding the TAIL to the end of
- * an earlier request.
- */
- GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
- prev = rq->ring->tail;
- tail = intel_ring_set_tail(rq->ring, rq->tail);
- if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
- desc |= CTX_DESC_FORCE_RESTORE;
- ce->lrc_reg_state[CTX_RING_TAIL] = tail;
- rq->tail = rq->wa_tail;
-
- /*
- * Make sure the context image is complete before we submit it to HW.
- *
- * Ostensibly, writes (including the WCB) should be flushed prior to
- * an uncached write such as our mmio register access, the empirical
- * evidence (esp. on Braswell) suggests that the WC write into memory
- * may not be visible to the HW prior to the completion of the UC
- * register write and that we may begin execution from the context
- * before its image is complete leading to invalid PD chasing.
- */
- wmb();
-
- ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE;
- return desc;
-}
-
-static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
-{
- if (execlists->ctrl_reg) {
- writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
- writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
- } else {
- writel(upper_32_bits(desc), execlists->submit_reg);
- writel(lower_32_bits(desc), execlists->submit_reg);
+ if (wa_ctx->indirect_ctx.size) {
+ lrc_setup_indirect_ctx(regs, engine,
+ i915_ggtt_offset(wa_ctx->vma) +
+ wa_ctx->indirect_ctx.offset,
+ wa_ctx->indirect_ctx.size);
}
}
-static __maybe_unused char *
-dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
+static void init_ppgtt_regs(u32 *regs, const struct i915_ppgtt *ppgtt)
{
- if (!rq)
- return "";
-
- snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d",
- prefix,
- rq->context->lrc.ccid,
- rq->fence.context, rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
- "",
- rq_prio(rq));
-
- return buf;
-}
-
-static __maybe_unused void
-trace_ports(const struct intel_engine_execlists *execlists,
- const char *msg,
- struct i915_request * const *ports)
-{
- const struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
- char __maybe_unused p0[40], p1[40];
-
- if (!ports[0])
- return;
-
- ENGINE_TRACE(engine, "%s { %s%s }\n", msg,
- dump_port(p0, sizeof(p0), "", ports[0]),
- dump_port(p1, sizeof(p1), ", ", ports[1]));
-}
-
-static inline bool
-reset_in_progress(const struct intel_engine_execlists *execlists)
-{
- return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
-}
-
-static __maybe_unused bool
-assert_pending_valid(const struct intel_engine_execlists *execlists,
- const char *msg)
-{
- struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
- struct i915_request * const *port, *rq;
- struct intel_context *ce = NULL;
- bool sentinel = false;
- u32 ccid = -1;
-
- trace_ports(execlists, msg, execlists->pending);
-
- /* We may be messing around with the lists during reset, lalala */
- if (reset_in_progress(execlists))
- return true;
-
- if (!execlists->pending[0]) {
- GEM_TRACE_ERR("%s: Nothing pending for promotion!\n",
- engine->name);
- return false;
- }
-
- if (execlists->pending[execlists_num_ports(execlists)]) {
- GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n",
- engine->name, execlists_num_ports(execlists));
- return false;
- }
-
- for (port = execlists->pending; (rq = *port); port++) {
- unsigned long flags;
- bool ok = true;
-
- GEM_BUG_ON(!kref_read(&rq->fence.refcount));
- GEM_BUG_ON(!i915_request_is_active(rq));
-
- if (ce == rq->context) {
- GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- return false;
- }
- ce = rq->context;
-
- if (ccid == ce->lrc.ccid) {
- GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n",
- engine->name,
- ccid, ce->timeline->fence_context,
- port - execlists->pending);
- return false;
- }
- ccid = ce->lrc.ccid;
-
- /*
- * Sentinels are supposed to be the last request so they flush
- * the current execution off the HW. Check that they are the only
- * request in the pending submission.
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
+ /* 64b PPGTT (48bit canonical)
+ * PDP0_DESCRIPTOR contains the base address to PML4 and
+ * other PDP Descriptors are ignored.
*/
- if (sentinel) {
- GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- return false;
- }
- sentinel = i915_request_has_sentinel(rq);
-
- /* Hold tightly onto the lock to prevent concurrent retires! */
- if (!spin_trylock_irqsave(&rq->lock, flags))
- continue;
-
- if (i915_request_completed(rq))
- goto unlock;
-
- if (i915_active_is_idle(&ce->active) &&
- !intel_context_is_barrier(ce)) {
- GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- ok = false;
- goto unlock;
- }
-
- if (!i915_vma_is_pinned(ce->state)) {
- GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- ok = false;
- goto unlock;
- }
-
- if (!i915_vma_is_pinned(ce->ring->vma)) {
- GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- ok = false;
- goto unlock;
- }
-
-unlock:
- spin_unlock_irqrestore(&rq->lock, flags);
- if (!ok)
- return false;
- }
-
- return ce;
-}
-
-static void execlists_submit_ports(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists *execlists = &engine->execlists;
- unsigned int n;
-
- GEM_BUG_ON(!assert_pending_valid(execlists, "submit"));
-
- /*
- * We can skip acquiring intel_runtime_pm_get() here as it was taken
- * on our behalf by the request (see i915_gem_mark_busy()) and it will
- * not be relinquished until the device is idle (see
- * i915_gem_idle_work_handler()). As a precaution, we make sure
- * that all ELSP are drained i.e. we have processed the CSB,
- * before allowing ourselves to idle and calling intel_runtime_pm_put().
- */
- GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
-
- /*
- * ELSQ note: the submit queue is not cleared after being submitted
- * to the HW so we need to make sure we always clean it up. This is
- * currently ensured by the fact that we always write the same number
- * of elsq entries, keep this in mind before changing the loop below.
- */
- for (n = execlists_num_ports(execlists); n--; ) {
- struct i915_request *rq = execlists->pending[n];
-
- write_desc(execlists,
- rq ? execlists_update_context(rq) : 0,
- n);
- }
-
- /* we need to manually load the submit queue */
- if (execlists->ctrl_reg)
- writel(EL_CTRL_LOAD, execlists->ctrl_reg);
-}
-
-static bool ctx_single_port_submission(const struct intel_context *ce)
-{
- return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
- intel_context_force_single_submission(ce));
-}
-
-static bool can_merge_ctx(const struct intel_context *prev,
- const struct intel_context *next)
-{
- if (prev != next)
- return false;
-
- if (ctx_single_port_submission(prev))
- return false;
-
- return true;
-}
-
-static unsigned long i915_request_flags(const struct i915_request *rq)
-{
- return READ_ONCE(rq->fence.flags);
-}
-
-static bool can_merge_rq(const struct i915_request *prev,
- const struct i915_request *next)
-{
- GEM_BUG_ON(prev == next);
- GEM_BUG_ON(!assert_priority_queue(prev, next));
-
- /*
- * We do not submit known completed requests. Therefore if the next
- * request is already completed, we can pretend to merge it in
- * with the previous context (and we will skip updating the ELSP
- * and tracking). Thus hopefully keeping the ELSP full with active
- * contexts, despite the best efforts of preempt-to-busy to confuse
- * us.
- */
- if (i915_request_completed(next))
- return true;
-
- if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
- (BIT(I915_FENCE_FLAG_NOPREEMPT) |
- BIT(I915_FENCE_FLAG_SENTINEL))))
- return false;
-
- if (!can_merge_ctx(prev->context, next->context))
- return false;
-
- GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno));
- return true;
-}
-
-static void virtual_update_register_offsets(u32 *regs,
- struct intel_engine_cs *engine)
-{
- set_offsets(regs, reg_offsets(engine), engine, false);
-}
-
-static bool virtual_matches(const struct virtual_engine *ve,
- const struct i915_request *rq,
- const struct intel_engine_cs *engine)
-{
- const struct intel_engine_cs *inflight;
-
- if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */
- return false;
-
- /*
- * We track when the HW has completed saving the context image
- * (i.e. when we have seen the final CS event switching out of
- * the context) and must not overwrite the context image before
- * then. This restricts us to only using the active engine
- * while the previous virtualized request is inflight (so
- * we reuse the register offsets). This is a very small
- * hystersis on the greedy seelction algorithm.
- */
- inflight = intel_context_inflight(&ve->context);
- if (inflight && inflight != engine)
- return false;
-
- return true;
-}
-
-static void virtual_xfer_context(struct virtual_engine *ve,
- struct intel_engine_cs *engine)
-{
- unsigned int n;
-
- if (likely(engine == ve->siblings[0]))
- return;
-
- GEM_BUG_ON(READ_ONCE(ve->context.inflight));
- if (!intel_engine_has_relative_mmio(engine))
- virtual_update_register_offsets(ve->context.lrc_reg_state,
- engine);
-
- /*
- * Move the bound engine to the top of the list for
- * future execution. We then kick this tasklet first
- * before checking others, so that we preferentially
- * reuse this set of bound registers.
- */
- for (n = 1; n < ve->num_siblings; n++) {
- if (ve->siblings[n] == engine) {
- swap(ve->siblings[n], ve->siblings[0]);
- break;
- }
+ ASSIGN_CTX_PML4(ppgtt, regs);
+ } else {
+ ASSIGN_CTX_PDP(ppgtt, regs, 3);
+ ASSIGN_CTX_PDP(ppgtt, regs, 2);
+ ASSIGN_CTX_PDP(ppgtt, regs, 1);
+ ASSIGN_CTX_PDP(ppgtt, regs, 0);
}
}
-#define for_each_waiter(p__, rq__) \
- list_for_each_entry_lockless(p__, \
- &(rq__)->sched.waiters_list, \
- wait_link)
-
-#define for_each_signaler(p__, rq__) \
- list_for_each_entry_rcu(p__, \
- &(rq__)->sched.signalers_list, \
- signal_link)
-
-static void defer_request(struct i915_request *rq, struct list_head * const pl)
-{
- LIST_HEAD(list);
-
- /*
- * We want to move the interrupted request to the back of
- * the round-robin list (i.e. its priority level), but
- * in doing so, we must then move all requests that were in
- * flight and were waiting for the interrupted request to
- * be run after it again.
- */
- do {
- struct i915_dependency *p;
-
- GEM_BUG_ON(i915_request_is_active(rq));
- list_move_tail(&rq->sched.link, pl);
-
- for_each_waiter(p, rq) {
- struct i915_request *w =
- container_of(p->waiter, typeof(*w), sched);
-
- if (p->flags & I915_DEPENDENCY_WEAK)
- continue;
-
- /* Leave semaphores spinning on the other engines */
- if (w->engine != rq->engine)
- continue;
-
- /* No waiter should start before its signaler */
- GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) &&
- i915_request_started(w) &&
- !i915_request_completed(rq));
-
- GEM_BUG_ON(i915_request_is_active(w));
- if (!i915_request_is_ready(w))
- continue;
-
- if (rq_prio(w) < rq_prio(rq))
- continue;
-
- GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
- list_move_tail(&w->sched.link, &list);
- }
-
- rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
- } while (rq);
-}
-
-static void defer_active(struct intel_engine_cs *engine)
+static struct i915_ppgtt *vm_alias(struct i915_address_space *vm)
{
- struct i915_request *rq;
-
- rq = __unwind_incomplete_requests(engine);
- if (!rq)
- return;
-
- defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
+ if (i915_is_ggtt(vm))
+ return i915_vm_to_ggtt(vm)->alias;
+ else
+ return i915_vm_to_ppgtt(vm);
}
-static bool
-need_timeslice(const struct intel_engine_cs *engine,
- const struct i915_request *rq,
- const struct rb_node *rb)
+static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
{
- int hint;
-
- if (!intel_engine_has_timeslices(engine))
- return false;
-
- hint = engine->execlists.queue_priority_hint;
-
- if (rb) {
- const struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- const struct intel_engine_cs *inflight =
- intel_context_inflight(&ve->context);
-
- if (!inflight || inflight == engine) {
- struct i915_request *next;
+ int x;
- rcu_read_lock();
- next = READ_ONCE(ve->request);
- if (next)
- hint = max(hint, rq_prio(next));
- rcu_read_unlock();
- }
+ x = lrc_ring_mi_mode(engine);
+ if (x != -1) {
+ regs[x + 1] &= ~STOP_RING;
+ regs[x + 1] |= STOP_RING << 16;
}
-
- if (!list_is_last(&rq->sched.link, &engine->active.requests))
- hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
-
- GEM_BUG_ON(hint >= I915_PRIORITY_UNPREEMPTABLE);
- return hint >= effective_prio(rq);
}
-static bool
-timeslice_yield(const struct intel_engine_execlists *el,
- const struct i915_request *rq)
+static void __lrc_init_regs(u32 *regs,
+ const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ bool inhibit)
{
/*
- * Once bitten, forever smitten!
+ * A context is actually a big batch buffer with several
+ * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
+ * values we are setting here are only for the first context restore:
+ * on a subsequent save, the GPU will recreate this batchbuffer with new
+ * values (including all the missing MI_LOAD_REGISTER_IMM commands that
+ * we are not initializing here).
*
- * If the active context ever busy-waited on a semaphore,
- * it will be treated as a hog until the end of its timeslice (i.e.
- * until it is scheduled out and replaced by a new submission,
- * possibly even its own lite-restore). The HW only sends an interrupt
- * on the first miss, and we do know if that semaphore has been
- * signaled, or even if it is now stuck on another semaphore. Play
- * safe, yield if it might be stuck -- it will be given a fresh
- * timeslice in the near future.
+ * Must keep consistent with virtual_update_register_offsets().
*/
- return rq->context->lrc.ccid == READ_ONCE(el->yield);
-}
-
-static bool
-timeslice_expired(const struct intel_engine_execlists *el,
- const struct i915_request *rq)
-{
- return timer_expired(&el->timer) || timeslice_yield(el, rq);
-}
-
-static int
-switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
-{
- if (list_is_last(&rq->sched.link, &engine->active.requests))
- return engine->execlists.queue_priority_hint;
-
- return rq_prio(list_next_entry(rq, sched.link));
-}
-
-static inline unsigned long
-timeslice(const struct intel_engine_cs *engine)
-{
- return READ_ONCE(engine->props.timeslice_duration_ms);
-}
-static unsigned long active_timeslice(const struct intel_engine_cs *engine)
-{
- const struct intel_engine_execlists *execlists = &engine->execlists;
- const struct i915_request *rq = *execlists->active;
-
- if (!rq || i915_request_completed(rq))
- return 0;
-
- if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq))
- return 0;
-
- return timeslice(engine);
-}
+ if (inhibit)
+ memset(regs, 0, PAGE_SIZE);
-static void set_timeslice(struct intel_engine_cs *engine)
-{
- unsigned long duration;
+ set_offsets(regs, reg_offsets(engine), engine, inhibit);
- if (!intel_engine_has_timeslices(engine))
- return;
+ init_common_regs(regs, ce, engine, inhibit);
+ init_ppgtt_regs(regs, vm_alias(ce->vm));
- duration = active_timeslice(engine);
- ENGINE_TRACE(engine, "bump timeslicing, interval:%lu", duration);
+ init_wa_bb_regs(regs, engine);
- set_timer_ms(&engine->execlists.timer, duration);
+ __reset_stop_ring(regs, engine);
}
-static void start_timeslice(struct intel_engine_cs *engine, int prio)
+void lrc_init_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ bool inhibit)
{
- struct intel_engine_execlists *execlists = &engine->execlists;
- unsigned long duration;
-
- if (!intel_engine_has_timeslices(engine))
- return;
-
- WRITE_ONCE(execlists->switch_priority_hint, prio);
- if (prio == INT_MIN)
- return;
-
- if (timer_pending(&execlists->timer))
- return;
-
- duration = timeslice(engine);
- ENGINE_TRACE(engine,
- "start timeslicing, prio:%d, interval:%lu",
- prio, duration);
-
- set_timer_ms(&execlists->timer, duration);
+ __lrc_init_regs(ce->lrc_reg_state, ce, engine, inhibit);
}
-static void record_preemption(struct intel_engine_execlists *execlists)
+void lrc_reset_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
{
- (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
+ __reset_stop_ring(ce->lrc_reg_state, engine);
}
-static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
- const struct i915_request *rq)
+static void
+set_redzone(void *vaddr, const struct intel_engine_cs *engine)
{
- if (!rq)
- return 0;
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return;
- /* Force a fast reset for terminated contexts (ignoring sysfs!) */
- if (unlikely(intel_context_is_banned(rq->context)))
- return 1;
+ vaddr += engine->context_size;
- return READ_ONCE(engine->props.preempt_timeout_ms);
+ memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE);
}
-static void set_preempt_timeout(struct intel_engine_cs *engine,
- const struct i915_request *rq)
+static void
+check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
{
- if (!intel_engine_has_preempt_reset(engine))
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
- set_timer_ms(&engine->execlists.preempt,
- active_preempt_timeout(engine, rq));
-}
-
-static inline void clear_ports(struct i915_request **ports, int count)
-{
- memset_p((void **)ports, NULL, count);
-}
+ vaddr += engine->context_size;
-static inline void
-copy_ports(struct i915_request **dst, struct i915_request **src, int count)
-{
- /* A memcpy_p() would be very useful here! */
- while (count--)
- WRITE_ONCE(*dst++, *src++); /* avoid write tearing */
+ if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
+ drm_err_once(&engine->i915->drm,
+ "%s context redzone overwritten!\n",
+ engine->name);
}
-static void execlists_dequeue(struct intel_engine_cs *engine)
+void lrc_init_state(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *state)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_request **port = execlists->pending;
- struct i915_request ** const last_port = port + execlists->port_mask;
- struct i915_request * const *active;
- struct i915_request *last;
- struct rb_node *rb;
- bool submit = false;
-
- /*
- * Hardware submission is through 2 ports. Conceptually each port
- * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
- * static for a context, and unique to each, so we only execute
- * requests belonging to a single context from each ring. RING_HEAD
- * is maintained by the CS in the context image, it marks the place
- * where it got up to last time, and through RING_TAIL we tell the CS
- * where we want to execute up to this time.
- *
- * In this list the requests are in order of execution. Consecutive
- * requests from the same context are adjacent in the ringbuffer. We
- * can combine these requests into a single RING_TAIL update:
- *
- * RING_HEAD...req1...req2
- * ^- RING_TAIL
- * since to execute req2 the CS must first execute req1.
- *
- * Our goal then is to point each port to the end of a consecutive
- * sequence of requests as being the most optimal (fewest wake ups
- * and context switches) submission.
- */
-
- for (rb = rb_first_cached(&execlists->virtual); rb; ) {
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- struct i915_request *rq = READ_ONCE(ve->request);
-
- if (!rq) { /* lazily cleanup after another engine handled rq */
- rb_erase_cached(rb, &execlists->virtual);
- RB_CLEAR_NODE(rb);
- rb = rb_first_cached(&execlists->virtual);
- continue;
- }
-
- if (!virtual_matches(ve, rq, engine)) {
- rb = rb_next(rb);
- continue;
- }
-
- break;
- }
-
- /*
- * If the queue is higher priority than the last
- * request in the currently active context, submit afresh.
- * We will resubmit again afterwards in case we need to split
- * the active context to interject the preemption request,
- * i.e. we will retrigger preemption following the ack in case
- * of trouble.
- */
- active = READ_ONCE(execlists->active);
-
- /*
- * In theory we can skip over completed contexts that have not
- * yet been processed by events (as those events are in flight):
- *
- * while ((last = *active) && i915_request_completed(last))
- * active++;
- *
- * However, the GPU cannot handle this as it will ultimately
- * find itself trying to jump back into a context it has just
- * completed and barf.
- */
-
- if ((last = *active)) {
- if (need_preempt(engine, last, rb)) {
- if (i915_request_completed(last)) {
- tasklet_hi_schedule(&execlists->tasklet);
- return;
- }
-
- ENGINE_TRACE(engine,
- "preempting last=%llx:%lld, prio=%d, hint=%d\n",
- last->fence.context,
- last->fence.seqno,
- last->sched.attr.priority,
- execlists->queue_priority_hint);
- record_preemption(execlists);
-
- /*
- * Don't let the RING_HEAD advance past the breadcrumb
- * as we unwind (and until we resubmit) so that we do
- * not accidentally tell it to go backwards.
- */
- ring_set_paused(engine, 1);
-
- /*
- * Note that we have not stopped the GPU at this point,
- * so we are unwinding the incomplete requests as they
- * remain inflight and so by the time we do complete
- * the preemption, some of the unwound requests may
- * complete!
- */
- __unwind_incomplete_requests(engine);
-
- last = NULL;
- } else if (need_timeslice(engine, last, rb) &&
- timeslice_expired(execlists, last)) {
- if (i915_request_completed(last)) {
- tasklet_hi_schedule(&execlists->tasklet);
- return;
- }
-
- ENGINE_TRACE(engine,
- "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
- last->fence.context,
- last->fence.seqno,
- last->sched.attr.priority,
- execlists->queue_priority_hint,
- yesno(timeslice_yield(execlists, last)));
-
- ring_set_paused(engine, 1);
- defer_active(engine);
-
- /*
- * Unlike for preemption, if we rewind and continue
- * executing the same context as previously active,
- * the order of execution will remain the same and
- * the tail will only advance. We do not need to
- * force a full context restore, as a lite-restore
- * is sufficient to resample the monotonic TAIL.
- *
- * If we switch to any other context, similarly we
- * will not rewind TAIL of current context, and
- * normal save/restore will preserve state and allow
- * us to later continue executing the same request.
- */
- last = NULL;
- } else {
- /*
- * Otherwise if we already have a request pending
- * for execution after the current one, we can
- * just wait until the next CS event before
- * queuing more. In either case we will force a
- * lite-restore preemption event, but if we wait
- * we hopefully coalesce several updates into a single
- * submission.
- */
- if (!list_is_last(&last->sched.link,
- &engine->active.requests)) {
- /*
- * Even if ELSP[1] is occupied and not worthy
- * of timeslices, our queue might be.
- */
- start_timeslice(engine, queue_prio(execlists));
- return;
- }
- }
- }
-
- while (rb) { /* XXX virtual is always taking precedence */
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- struct i915_request *rq;
-
- spin_lock(&ve->base.active.lock);
-
- rq = ve->request;
- if (unlikely(!rq)) { /* lost the race to a sibling */
- spin_unlock(&ve->base.active.lock);
- rb_erase_cached(rb, &execlists->virtual);
- RB_CLEAR_NODE(rb);
- rb = rb_first_cached(&execlists->virtual);
- continue;
- }
+ bool inhibit = true;
- GEM_BUG_ON(rq != ve->request);
- GEM_BUG_ON(rq->engine != &ve->base);
- GEM_BUG_ON(rq->context != &ve->context);
-
- if (rq_prio(rq) >= queue_prio(execlists)) {
- if (!virtual_matches(ve, rq, engine)) {
- spin_unlock(&ve->base.active.lock);
- rb = rb_next(rb);
- continue;
- }
-
- if (last && !can_merge_rq(last, rq)) {
- spin_unlock(&ve->base.active.lock);
- start_timeslice(engine, rq_prio(rq));
- return; /* leave this for another sibling */
- }
-
- ENGINE_TRACE(engine,
- "virtual rq=%llx:%lld%s, new engine? %s\n",
- rq->fence.context,
- rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
- "",
- yesno(engine != ve->siblings[0]));
-
- WRITE_ONCE(ve->request, NULL);
- WRITE_ONCE(ve->base.execlists.queue_priority_hint,
- INT_MIN);
- rb_erase_cached(rb, &execlists->virtual);
- RB_CLEAR_NODE(rb);
-
- GEM_BUG_ON(!(rq->execution_mask & engine->mask));
- WRITE_ONCE(rq->engine, engine);
-
- if (__i915_request_submit(rq)) {
- /*
- * Only after we confirm that we will submit
- * this request (i.e. it has not already
- * completed), do we want to update the context.
- *
- * This serves two purposes. It avoids
- * unnecessary work if we are resubmitting an
- * already completed request after timeslicing.
- * But more importantly, it prevents us altering
- * ve->siblings[] on an idle context, where
- * we may be using ve->siblings[] in
- * virtual_context_enter / virtual_context_exit.
- */
- virtual_xfer_context(ve, engine);
- GEM_BUG_ON(ve->siblings[0] != engine);
-
- submit = true;
- last = rq;
- }
- i915_request_put(rq);
-
- /*
- * Hmm, we have a bunch of virtual engine requests,
- * but the first one was already completed (thanks
- * preempt-to-busy!). Keep looking at the veng queue
- * until we have no more relevant requests (i.e.
- * the normal submit queue has higher priority).
- */
- if (!submit) {
- spin_unlock(&ve->base.active.lock);
- rb = rb_first_cached(&execlists->virtual);
- continue;
- }
- }
+ set_redzone(state, engine);
- spin_unlock(&ve->base.active.lock);
- break;
+ if (engine->default_state) {
+ shmem_read(engine->default_state, 0,
+ state, engine->context_size);
+ __set_bit(CONTEXT_VALID_BIT, &ce->flags);
+ inhibit = false;
}
- while ((rb = rb_first_cached(&execlists->queue))) {
- struct i915_priolist *p = to_priolist(rb);
- struct i915_request *rq, *rn;
- int i;
-
- priolist_for_each_request_consume(rq, rn, p, i) {
- bool merge = true;
-
- /*
- * Can we combine this request with the current port?
- * It has to be the same context/ringbuffer and not
- * have any exceptions (e.g. GVT saying never to
- * combine contexts).
- *
- * If we can combine the requests, we can execute both
- * by updating the RING_TAIL to point to the end of the
- * second request, and so we never need to tell the
- * hardware about the first.
- */
- if (last && !can_merge_rq(last, rq)) {
- /*
- * If we are on the second port and cannot
- * combine this request with the last, then we
- * are done.
- */
- if (port == last_port)
- goto done;
-
- /*
- * We must not populate both ELSP[] with the
- * same LRCA, i.e. we must submit 2 different
- * contexts if we submit 2 ELSP.
- */
- if (last->context == rq->context)
- goto done;
-
- if (i915_request_has_sentinel(last))
- goto done;
-
- /*
- * If GVT overrides us we only ever submit
- * port[0], leaving port[1] empty. Note that we
- * also have to be careful that we don't queue
- * the same context (even though a different
- * request) to the second port.
- */
- if (ctx_single_port_submission(last->context) ||
- ctx_single_port_submission(rq->context))
- goto done;
-
- merge = false;
- }
-
- if (__i915_request_submit(rq)) {
- if (!merge) {
- *port = execlists_schedule_in(last, port - execlists->pending);
- port++;
- last = NULL;
- }
-
- GEM_BUG_ON(last &&
- !can_merge_ctx(last->context,
- rq->context));
- GEM_BUG_ON(last &&
- i915_seqno_passed(last->fence.seqno,
- rq->fence.seqno));
-
- submit = true;
- last = rq;
- }
- }
-
- rb_erase_cached(&p->node, &execlists->queue);
- i915_priolist_free(p);
- }
+ /* Clear the ppHWSP (inc. per-context counters) */
+ memset(state, 0, PAGE_SIZE);
-done:
/*
- * Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
- *
- * We choose the priority hint such that if we add a request of greater
- * priority than this, we kick the submission tasklet to decide on
- * the right order of submitting the requests to hardware. We must
- * also be prepared to reorder requests as they are in-flight on the
- * HW. We derive the priority hint then as the first "hole" in
- * the HW submission ports and if there are no available slots,
- * the priority of the lowest executing request, i.e. last.
- *
- * When we do receive a higher priority request ready to run from the
- * user, see queue_request(), the priority hint is bumped to that
- * request triggering preemption on the next dequeue (or subsequent
- * interrupt for secondary ports).
+ * The second page of the context object contains some registers which
+ * must be set up prior to the first execution.
*/
- execlists->queue_priority_hint = queue_prio(execlists);
-
- if (submit) {
- *port = execlists_schedule_in(last, port - execlists->pending);
- execlists->switch_priority_hint =
- switch_prio(engine, *execlists->pending);
-
- /*
- * Skip if we ended up with exactly the same set of requests,
- * e.g. trying to timeslice a pair of ordered contexts
- */
- if (!memcmp(active, execlists->pending,
- (port - execlists->pending + 1) * sizeof(*port))) {
- do
- execlists_schedule_out(fetch_and_zero(port));
- while (port-- != execlists->pending);
-
- goto skip_submit;
- }
- clear_ports(port + 1, last_port - port);
-
- WRITE_ONCE(execlists->yield, -1);
- set_preempt_timeout(engine, *active);
- execlists_submit_ports(engine);
- } else {
- start_timeslice(engine, execlists->queue_priority_hint);
-skip_submit:
- ring_set_paused(engine, 0);
- }
+ __lrc_init_regs(state + LRC_STATE_OFFSET, ce, engine, inhibit);
}
-static void
-cancel_port_requests(struct intel_engine_execlists * const execlists)
+static struct i915_vma *
+__lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
{
- struct i915_request * const *port;
-
- for (port = execlists->pending; *port; port++)
- execlists_schedule_out(*port);
- clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
-
- /* Mark the end of active before we overwrite *active */
- for (port = xchg(&execlists->active, execlists->pending); *port; port++)
- execlists_schedule_out(*port);
- clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
-
- smp_wmb(); /* complete the seqlock for execlists_active() */
- WRITE_ONCE(execlists->active, execlists->inflight);
-}
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 context_size;
-static inline void
-invalidate_csb_entries(const u64 *first, const u64 *last)
-{
- clflush((void *)first);
- clflush((void *)last);
-}
+ context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
-/*
- * Starting with Gen12, the status has a new format:
- *
- * bit 0: switched to new queue
- * bit 1: reserved
- * bit 2: semaphore wait mode (poll or signal), only valid when
- * switch detail is set to "wait on semaphore"
- * bits 3-5: engine class
- * bits 6-11: engine instance
- * bits 12-14: reserved
- * bits 15-25: sw context id of the lrc the GT switched to
- * bits 26-31: sw counter of the lrc the GT switched to
- * bits 32-35: context switch detail
- * - 0: ctx complete
- * - 1: wait on sync flip
- * - 2: wait on vblank
- * - 3: wait on scanline
- * - 4: wait on semaphore
- * - 5: context preempted (not on SEMAPHORE_WAIT or
- * WAIT_FOR_EVENT)
- * bit 36: reserved
- * bits 37-43: wait detail (for switch detail 1 to 4)
- * bits 44-46: reserved
- * bits 47-57: sw context id of the lrc the GT switched away from
- * bits 58-63: sw counter of the lrc the GT switched away from
- */
-static inline bool gen12_csb_parse(const u64 csb)
-{
- bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(csb));
- bool new_queue =
- lower_32_bits(csb) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ context_size += I915_GTT_PAGE_SIZE; /* for redzone */
- /*
- * The context switch detail is not guaranteed to be 5 when a preemption
- * occurs, so we can't just check for that. The check below works for
- * all the cases we care about, including preemptions of WAIT
- * instructions and lite-restore. Preempt-to-idle via the CTRL register
- * would require some extra handling, but we don't support that.
- */
- if (!ctx_away_valid || new_queue) {
- GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(csb)));
- return true;
+ if (INTEL_GEN(engine->i915) == 12) {
+ ce->wa_bb_page = context_size / PAGE_SIZE;
+ context_size += PAGE_SIZE;
}
- /*
- * switch detail = 5 is covered by the case above and we do not expect a
- * context switch on an unsuccessful wait instruction since we always
- * use polling mode.
- */
- GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(csb)));
- return false;
-}
-
-static inline bool gen8_csb_parse(const u64 csb)
-{
- return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
-}
-
-static noinline u64
-wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb)
-{
- u64 entry;
-
- /*
- * Reading from the HWSP has one particular advantage: we can detect
- * a stale entry. Since the write into HWSP is broken, we have no reason
- * to trust the HW at all, the mmio entry may equally be unordered, so
- * we prefer the path that is self-checking and as a last resort,
- * return the mmio value.
- *
- * tgl,dg1:HSDES#22011327657
- */
- preempt_disable();
- if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 10)) {
- int idx = csb - engine->execlists.csb_status;
- int status;
-
- status = GEN8_EXECLISTS_STATUS_BUF;
- if (idx >= 6) {
- status = GEN11_EXECLISTS_STATUS_BUF2;
- idx -= 6;
- }
- status += sizeof(u64) * idx;
+ obj = i915_gem_object_create_shmem(engine->i915, context_size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
- entry = intel_uncore_read64(engine->uncore,
- _MMIO(engine->mmio_base + status));
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
}
- preempt_enable();
-
- return entry;
-}
-static inline u64
-csb_read(const struct intel_engine_cs *engine, u64 * const csb)
-{
- u64 entry = READ_ONCE(*csb);
-
- /*
- * Unfortunately, the GPU does not always serialise its write
- * of the CSB entries before its write of the CSB pointer, at least
- * from the perspective of the CPU, using what is known as a Global
- * Observation Point. We may read a new CSB tail pointer, but then
- * read the stale CSB entries, causing us to misinterpret the
- * context-switch events, and eventually declare the GPU hung.
- *
- * icl:HSDES#1806554093
- * tgl:HSDES#22011248461
- */
- if (unlikely(entry == -1))
- entry = wa_csb_read(engine, csb);
-
- /* Consume this entry so that we can spot its future reuse. */
- WRITE_ONCE(*csb, -1);
-
- /* ELSP is an implicit wmb() before the GPU wraps and overwrites csb */
- return entry;
+ return vma;
}
-static void process_csb(struct intel_engine_cs *engine)
+static struct intel_timeline *
+pinned_timeline(struct intel_context *ce, struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- u64 * const buf = execlists->csb_status;
- const u8 num_entries = execlists->csb_size;
- u8 head, tail;
-
- /*
- * As we modify our execlists state tracking we require exclusive
- * access. Either we are inside the tasklet, or the tasklet is disabled
- * and we assume that is only inside the reset paths and so serialised.
- */
- GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
- !reset_in_progress(execlists));
- GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
-
- /*
- * Note that csb_write, csb_status may be either in HWSP or mmio.
- * When reading from the csb_write mmio register, we have to be
- * careful to only use the GEN8_CSB_WRITE_PTR portion, which is
- * the low 4bits. As it happens we know the next 4bits are always
- * zero and so we can simply masked off the low u8 of the register
- * and treat it identically to reading from the HWSP (without having
- * to use explicit shifting and masking, and probably bifurcating
- * the code to handle the legacy mmio read).
- */
- head = execlists->csb_head;
- tail = READ_ONCE(*execlists->csb_write);
- if (unlikely(head == tail))
- return;
-
- /*
- * We will consume all events from HW, or at least pretend to.
- *
- * The sequence of events from the HW is deterministic, and derived
- * from our writes to the ELSP, with a smidgen of variability for
- * the arrival of the asynchronous requests wrt to the inflight
- * execution. If the HW sends an event that does not correspond with
- * the one we are expecting, we have to abandon all hope as we lose
- * all tracking of what the engine is actually executing. We will
- * only detect we are out of sequence with the HW when we get an
- * 'impossible' event because we have already drained our own
- * preemption/promotion queue. If this occurs, we know that we likely
- * lost track of execution earlier and must unwind and restart, the
- * simplest way is by stop processing the event queue and force the
- * engine to reset.
- */
- execlists->csb_head = tail;
- ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
-
- /*
- * Hopefully paired with a wmb() in HW!
- *
- * We must complete the read of the write pointer before any reads
- * from the CSB, so that we do not see stale values. Without an rmb
- * (lfence) the HW may speculatively perform the CSB[] reads *before*
- * we perform the READ_ONCE(*csb_write).
- */
- rmb();
- do {
- bool promote;
- u64 csb;
-
- if (++head == num_entries)
- head = 0;
-
- /*
- * We are flying near dragons again.
- *
- * We hold a reference to the request in execlist_port[]
- * but no more than that. We are operating in softirq
- * context and so cannot hold any mutex or sleep. That
- * prevents us stopping the requests we are processing
- * in port[] from being retired simultaneously (the
- * breadcrumb will be complete before we see the
- * context-switch). As we only hold the reference to the
- * request, any pointer chasing underneath the request
- * is subject to a potential use-after-free. Thus we
- * store all of the bookkeeping within port[] as
- * required, and avoid using unguarded pointers beneath
- * request itself. The same applies to the atomic
- * status notifier.
- */
-
- csb = csb_read(engine, buf + head);
- ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
- head, upper_32_bits(csb), lower_32_bits(csb));
-
- if (INTEL_GEN(engine->i915) >= 12)
- promote = gen12_csb_parse(csb);
- else
- promote = gen8_csb_parse(csb);
- if (promote) {
- struct i915_request * const *old = execlists->active;
-
- if (GEM_WARN_ON(!*execlists->pending)) {
- execlists->error_interrupt |= ERROR_CSB;
- break;
- }
-
- ring_set_paused(engine, 0);
-
- /* Point active to the new ELSP; prevent overwriting */
- WRITE_ONCE(execlists->active, execlists->pending);
- smp_wmb(); /* notify execlists_active() */
-
- /* cancel old inflight, prepare for switch */
- trace_ports(execlists, "preempted", old);
- while (*old)
- execlists_schedule_out(*old++);
-
- /* switch pending to inflight */
- GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
- copy_ports(execlists->inflight,
- execlists->pending,
- execlists_num_ports(execlists));
- smp_wmb(); /* complete the seqlock */
- WRITE_ONCE(execlists->active, execlists->inflight);
-
- /* XXX Magic delay for tgl */
- ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
-
- WRITE_ONCE(execlists->pending[0], NULL);
- } else {
- if (GEM_WARN_ON(!*execlists->active)) {
- execlists->error_interrupt |= ERROR_CSB;
- break;
- }
-
- /* port0 completed, advanced to port1 */
- trace_ports(execlists, "completed", execlists->active);
-
- /*
- * We rely on the hardware being strongly
- * ordered, that the breadcrumb write is
- * coherent (visible from the CPU) before the
- * user interrupt is processed. One might assume
- * that the breadcrumb write being before the
- * user interrupt and the CS event for the context
- * switch would therefore be before the CS event
- * itself...
- */
- if (GEM_SHOW_DEBUG() &&
- !i915_request_completed(*execlists->active)) {
- struct i915_request *rq = *execlists->active;
- const u32 *regs __maybe_unused =
- rq->context->lrc_reg_state;
-
- ENGINE_TRACE(engine,
- "context completed before request!\n");
- ENGINE_TRACE(engine,
- "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
- ENGINE_READ(engine, RING_START),
- ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR,
- ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_MI_MODE));
- ENGINE_TRACE(engine,
- "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ",
- i915_ggtt_offset(rq->ring->vma),
- rq->head, rq->tail,
- rq->fence.context,
- lower_32_bits(rq->fence.seqno),
- hwsp_seqno(rq));
- ENGINE_TRACE(engine,
- "ctx:{start:%08x, head:%04x, tail:%04x}, ",
- regs[CTX_RING_START],
- regs[CTX_RING_HEAD],
- regs[CTX_RING_TAIL]);
- }
-
- execlists_schedule_out(*execlists->active++);
-
- GEM_BUG_ON(execlists->active - execlists->inflight >
- execlists_num_ports(execlists));
- }
- } while (head != tail);
-
- set_timeslice(engine);
-
- /*
- * Gen11 has proven to fail wrt global observation point between
- * entry and tail update, failing on the ordering and thus
- * we see an old entry in the context status buffer.
- *
- * Forcibly evict out entries for the next gpu csb update,
- * to increase the odds that we get a fresh entries with non
- * working hardware. The cost for doing so comes out mostly with
- * the wash as hardware, working or not, will need to do the
- * invalidation before.
- */
- invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
-}
+ struct intel_timeline *tl = fetch_and_zero(&ce->timeline);
-static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
-{
- lockdep_assert_held(&engine->active.lock);
- if (!READ_ONCE(engine->execlists.pending[0])) {
- rcu_read_lock(); /* protect peeking at execlists->active */
- execlists_dequeue(engine);
- rcu_read_unlock();
- }
+ return intel_timeline_create_from_engine(engine, page_unmask_bits(tl));
}
-static void __execlists_hold(struct i915_request *rq)
+int lrc_alloc(struct intel_context *ce, struct intel_engine_cs *engine)
{
- LIST_HEAD(list);
-
- do {
- struct i915_dependency *p;
-
- if (i915_request_is_active(rq))
- __i915_request_unsubmit(rq);
-
- clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
- list_move_tail(&rq->sched.link, &rq->engine->active.hold);
- i915_request_set_hold(rq);
- RQ_TRACE(rq, "on hold\n");
-
- for_each_waiter(p, rq) {
- struct i915_request *w =
- container_of(p->waiter, typeof(*w), sched);
-
- /* Leave semaphores spinning on the other engines */
- if (w->engine != rq->engine)
- continue;
-
- if (!i915_request_is_ready(w))
- continue;
-
- if (i915_request_completed(w))
- continue;
-
- if (i915_request_on_hold(w))
- continue;
-
- list_move_tail(&w->sched.link, &list);
- }
-
- rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
- } while (rq);
-}
+ struct intel_ring *ring;
+ struct i915_vma *vma;
+ int err;
-static bool execlists_hold(struct intel_engine_cs *engine,
- struct i915_request *rq)
-{
- if (i915_request_on_hold(rq))
- return false;
+ GEM_BUG_ON(ce->state);
- spin_lock_irq(&engine->active.lock);
+ vma = __lrc_alloc_state(ce, engine);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
- if (i915_request_completed(rq)) { /* too late! */
- rq = NULL;
- goto unlock;
+ ring = intel_engine_create_ring(engine, (unsigned long)ce->ring);
+ if (IS_ERR(ring)) {
+ err = PTR_ERR(ring);
+ goto err_vma;
}
- if (rq->engine != engine) { /* preempted virtual engine */
- struct virtual_engine *ve = to_virtual_engine(rq->engine);
-
- /*
- * intel_context_inflight() is only protected by virtue
- * of process_csb() being called only by the tasklet (or
- * directly from inside reset while the tasklet is suspended).
- * Assert that neither of those are allowed to run while we
- * poke at the request queues.
- */
- GEM_BUG_ON(!reset_in_progress(&engine->execlists));
+ if (!page_mask_bits(ce->timeline)) {
+ struct intel_timeline *tl;
/*
- * An unsubmitted request along a virtual engine will
- * remain on the active (this) engine until we are able
- * to process the context switch away (and so mark the
- * context as no longer in flight). That cannot have happened
- * yet, otherwise we would not be hanging!
+ * Use the static global HWSP for the kernel context, and
+ * a dynamically allocated cacheline for everyone else.
*/
- spin_lock(&ve->base.active.lock);
- GEM_BUG_ON(intel_context_inflight(rq->context) != engine);
- GEM_BUG_ON(ve->request != rq);
- ve->request = NULL;
- spin_unlock(&ve->base.active.lock);
- i915_request_put(rq);
-
- rq->engine = engine;
- }
-
- /*
- * Transfer this request onto the hold queue to prevent it
- * being resumbitted to HW (and potentially completed) before we have
- * released it. Since we may have already submitted following
- * requests, we need to remove those as well.
- */
- GEM_BUG_ON(i915_request_on_hold(rq));
- GEM_BUG_ON(rq->engine != engine);
- __execlists_hold(rq);
- GEM_BUG_ON(list_empty(&engine->active.hold));
-
-unlock:
- spin_unlock_irq(&engine->active.lock);
- return rq;
-}
-
-static bool hold_request(const struct i915_request *rq)
-{
- struct i915_dependency *p;
- bool result = false;
-
- /*
- * If one of our ancestors is on hold, we must also be on hold,
- * otherwise we will bypass it and execute before it.
- */
- rcu_read_lock();
- for_each_signaler(p, rq) {
- const struct i915_request *s =
- container_of(p->signaler, typeof(*s), sched);
-
- if (s->engine != rq->engine)
- continue;
-
- result = i915_request_on_hold(s);
- if (result)
- break;
- }
- rcu_read_unlock();
-
- return result;
-}
-
-static void __execlists_unhold(struct i915_request *rq)
-{
- LIST_HEAD(list);
-
- do {
- struct i915_dependency *p;
-
- RQ_TRACE(rq, "hold release\n");
-
- GEM_BUG_ON(!i915_request_on_hold(rq));
- GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
-
- i915_request_clear_hold(rq);
- list_move_tail(&rq->sched.link,
- i915_sched_lookup_priolist(rq->engine,
- rq_prio(rq)));
- set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-
- /* Also release any children on this engine that are ready */
- for_each_waiter(p, rq) {
- struct i915_request *w =
- container_of(p->waiter, typeof(*w), sched);
-
- /* Propagate any change in error status */
- if (rq->fence.error)
- i915_request_set_error_once(w, rq->fence.error);
-
- if (w->engine != rq->engine)
- continue;
-
- if (!i915_request_on_hold(w))
- continue;
-
- /* Check that no other parents are also on hold */
- if (hold_request(w))
- continue;
-
- list_move_tail(&w->sched.link, &list);
- }
-
- rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
- } while (rq);
-}
-
-static void execlists_unhold(struct intel_engine_cs *engine,
- struct i915_request *rq)
-{
- spin_lock_irq(&engine->active.lock);
-
- /*
- * Move this request back to the priority queue, and all of its
- * children and grandchildren that were suspended along with it.
- */
- __execlists_unhold(rq);
-
- if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
- engine->execlists.queue_priority_hint = rq_prio(rq);
- tasklet_hi_schedule(&engine->execlists.tasklet);
- }
-
- spin_unlock_irq(&engine->active.lock);
-}
-
-struct execlists_capture {
- struct work_struct work;
- struct i915_request *rq;
- struct i915_gpu_coredump *error;
-};
-
-static void execlists_capture_work(struct work_struct *work)
-{
- struct execlists_capture *cap = container_of(work, typeof(*cap), work);
- const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
- struct intel_engine_cs *engine = cap->rq->engine;
- struct intel_gt_coredump *gt = cap->error->gt;
- struct intel_engine_capture_vma *vma;
-
- /* Compress all the objects attached to the request, slow! */
- vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp);
- if (vma) {
- struct i915_vma_compress *compress =
- i915_vma_capture_prepare(gt);
-
- intel_engine_coredump_add_vma(gt->engine, vma, compress);
- i915_vma_capture_finish(gt, compress);
- }
-
- gt->simulated = gt->engine->simulated;
- cap->error->simulated = gt->simulated;
-
- /* Publish the error state, and announce it to the world */
- i915_error_state_store(cap->error);
- i915_gpu_coredump_put(cap->error);
-
- /* Return this request and all that depend upon it for signaling */
- execlists_unhold(engine, cap->rq);
- i915_request_put(cap->rq);
-
- kfree(cap);
-}
-
-static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
-{
- const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
- struct execlists_capture *cap;
-
- cap = kmalloc(sizeof(*cap), gfp);
- if (!cap)
- return NULL;
-
- cap->error = i915_gpu_coredump_alloc(engine->i915, gfp);
- if (!cap->error)
- goto err_cap;
-
- cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp);
- if (!cap->error->gt)
- goto err_gpu;
-
- cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp);
- if (!cap->error->gt->engine)
- goto err_gt;
-
- cap->error->gt->engine->hung = true;
-
- return cap;
-
-err_gt:
- kfree(cap->error->gt);
-err_gpu:
- kfree(cap->error);
-err_cap:
- kfree(cap);
- return NULL;
-}
-
-static struct i915_request *
-active_context(struct intel_engine_cs *engine, u32 ccid)
-{
- const struct intel_engine_execlists * const el = &engine->execlists;
- struct i915_request * const *port, *rq;
-
- /*
- * Use the most recent result from process_csb(), but just in case
- * we trigger an error (via interrupt) before the first CS event has
- * been written, peek at the next submission.
- */
-
- for (port = el->active; (rq = *port); port++) {
- if (rq->context->lrc.ccid == ccid) {
- ENGINE_TRACE(engine,
- "ccid found at active:%zd\n",
- port - el->active);
- return rq;
- }
- }
-
- for (port = el->pending; (rq = *port); port++) {
- if (rq->context->lrc.ccid == ccid) {
- ENGINE_TRACE(engine,
- "ccid found at pending:%zd\n",
- port - el->pending);
- return rq;
+ if (unlikely(ce->timeline))
+ tl = pinned_timeline(ce, engine);
+ else
+ tl = intel_timeline_create(engine->gt);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto err_ring;
}
- }
-
- ENGINE_TRACE(engine, "ccid:%x not found\n", ccid);
- return NULL;
-}
-
-static u32 active_ccid(struct intel_engine_cs *engine)
-{
- return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
-}
-static void execlists_capture(struct intel_engine_cs *engine)
-{
- struct execlists_capture *cap;
-
- if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
- return;
-
- /*
- * We need to _quickly_ capture the engine state before we reset.
- * We are inside an atomic section (softirq) here and we are delaying
- * the forced preemption event.
- */
- cap = capture_regs(engine);
- if (!cap)
- return;
-
- spin_lock_irq(&engine->active.lock);
- cap->rq = active_context(engine, active_ccid(engine));
- if (cap->rq) {
- cap->rq = active_request(cap->rq->context->timeline, cap->rq);
- cap->rq = i915_request_get_rcu(cap->rq);
+ ce->timeline = tl;
}
- spin_unlock_irq(&engine->active.lock);
- if (!cap->rq)
- goto err_free;
-
- /*
- * Remove the request from the execlists queue, and take ownership
- * of the request. We pass it to our worker who will _slowly_ compress
- * all the pages the _user_ requested for debugging their batch, after
- * which we return it to the queue for signaling.
- *
- * By removing them from the execlists queue, we also remove the
- * requests from being processed by __unwind_incomplete_requests()
- * during the intel_engine_reset(), and so they will *not* be replayed
- * afterwards.
- *
- * Note that because we have not yet reset the engine at this point,
- * it is possible for the request that we have identified as being
- * guilty, did in fact complete and we will then hit an arbitration
- * point allowing the outstanding preemption to succeed. The likelihood
- * of that is very low (as capturing of the engine registers should be
- * fast enough to run inside an irq-off atomic section!), so we will
- * simply hold that request accountable for being non-preemptible
- * long enough to force the reset.
- */
- if (!execlists_hold(engine, cap->rq))
- goto err_rq;
-
- INIT_WORK(&cap->work, execlists_capture_work);
- schedule_work(&cap->work);
- return;
-
-err_rq:
- i915_request_put(cap->rq);
-err_free:
- i915_gpu_coredump_put(cap->error);
- kfree(cap);
-}
-
-static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
-{
- const unsigned int bit = I915_RESET_ENGINE + engine->id;
- unsigned long *lock = &engine->gt->reset.flags;
-
- if (!intel_has_reset_engine(engine->gt))
- return;
-
- if (test_and_set_bit(bit, lock))
- return;
-
- ENGINE_TRACE(engine, "reset for %s\n", msg);
- /* Mark this tasklet as disabled to avoid waiting for it to complete */
- tasklet_disable_nosync(&engine->execlists.tasklet);
+ ce->ring = ring;
+ ce->state = vma;
- ring_set_paused(engine, 1); /* Freeze the current request in place */
- execlists_capture(engine);
- intel_engine_reset(engine, msg);
+ return 0;
- tasklet_enable(&engine->execlists.tasklet);
- clear_and_wake_up_bit(bit, lock);
+err_ring:
+ intel_ring_put(ring);
+err_vma:
+ i915_vma_put(vma);
+ return err;
}
-static bool preempt_timeout(const struct intel_engine_cs *const engine)
+void lrc_reset(struct intel_context *ce)
{
- const struct timer_list *t = &engine->execlists.preempt;
-
- if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
- return false;
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
- if (!timer_expired(t))
- return false;
+ intel_ring_reset(ce->ring, ce->ring->emit);
- return READ_ONCE(engine->execlists.pending[0]);
+ /* Scrub away the garbage */
+ lrc_init_regs(ce, ce->engine, true);
+ ce->lrc.lrca = lrc_update_regs(ce, ce->engine, ce->ring->tail);
}
-/*
- * Check the unread Context Status Buffers and manage the submission of new
- * contexts to the ELSP accordingly.
- */
-static void execlists_submission_tasklet(unsigned long data)
+int
+lrc_pre_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
- bool timeout = preempt_timeout(engine);
-
- process_csb(engine);
-
- if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
- const char *msg;
-
- /* Generate the error message in priority wrt to the user! */
- if (engine->execlists.error_interrupt & GENMASK(15, 0))
- msg = "CS error"; /* thrown by a user payload */
- else if (engine->execlists.error_interrupt & ERROR_CSB)
- msg = "invalid CSB event";
- else
- msg = "internal error";
-
- engine->execlists.error_interrupt = 0;
- execlists_reset(engine, msg);
- }
-
- if (!READ_ONCE(engine->execlists.pending[0]) || timeout) {
- unsigned long flags;
+ GEM_BUG_ON(!ce->state);
+ GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
- spin_lock_irqsave(&engine->active.lock, flags);
- __execlists_submission_tasklet(engine);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ *vaddr = i915_gem_object_pin_map(ce->state->obj,
+ i915_coherent_map_type(ce->engine->i915) |
+ I915_MAP_OVERRIDE);
- /* Recheck after serialising with direct-submission */
- if (unlikely(timeout && preempt_timeout(engine))) {
- cancel_timer(&engine->execlists.preempt);
- execlists_reset(engine, "preemption time out");
- }
- }
+ return PTR_ERR_OR_ZERO(*vaddr);
}
-static void __execlists_kick(struct intel_engine_execlists *execlists)
+int
+lrc_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *vaddr)
{
- /* Kick the tasklet for some interrupt coalescing and reset handling */
- tasklet_hi_schedule(&execlists->tasklet);
-}
-
-#define execlists_kick(t, member) \
- __execlists_kick(container_of(t, struct intel_engine_execlists, member))
+ ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET;
-static void execlists_timeslice(struct timer_list *timer)
-{
- execlists_kick(timer, timer);
-}
+ if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags))
+ lrc_init_state(ce, engine, vaddr);
-static void execlists_preempt(struct timer_list *timer)
-{
- execlists_kick(timer, preempt);
+ ce->lrc.lrca = lrc_update_regs(ce, engine, ce->ring->tail);
+ return 0;
}
-static void queue_request(struct intel_engine_cs *engine,
- struct i915_request *rq)
+void lrc_unpin(struct intel_context *ce)
{
- GEM_BUG_ON(!list_empty(&rq->sched.link));
- list_add_tail(&rq->sched.link,
- i915_sched_lookup_priolist(engine, rq_prio(rq)));
- set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET,
+ ce->engine);
}
-static void __submit_queue_imm(struct intel_engine_cs *engine)
+void lrc_post_unpin(struct intel_context *ce)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
-
- if (reset_in_progress(execlists))
- return; /* defer until we restart the engine following reset */
-
- __execlists_submission_tasklet(engine);
+ i915_gem_object_unpin_map(ce->state->obj);
}
-static void submit_queue(struct intel_engine_cs *engine,
- const struct i915_request *rq)
+void lrc_fini(struct intel_context *ce)
{
- struct intel_engine_execlists *execlists = &engine->execlists;
-
- if (rq_prio(rq) <= execlists->queue_priority_hint)
+ if (!ce->state)
return;
- execlists->queue_priority_hint = rq_prio(rq);
- __submit_queue_imm(engine);
-}
-
-static bool ancestor_on_hold(const struct intel_engine_cs *engine,
- const struct i915_request *rq)
-{
- GEM_BUG_ON(i915_request_on_hold(rq));
- return !list_empty(&engine->active.hold) && hold_request(rq);
-}
-
-static void flush_csb(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists *el = &engine->execlists;
-
- if (READ_ONCE(el->pending[0]) && tasklet_trylock(&el->tasklet)) {
- if (!reset_in_progress(el))
- process_csb(engine);
- tasklet_unlock(&el->tasklet);
- }
-}
-
-static void execlists_submit_request(struct i915_request *request)
-{
- struct intel_engine_cs *engine = request->engine;
- unsigned long flags;
-
- /* Hopefully we clear execlists->pending[] to let us through */
- flush_csb(engine);
-
- /* Will be called from irq-context when using foreign fences. */
- spin_lock_irqsave(&engine->active.lock, flags);
-
- if (unlikely(ancestor_on_hold(engine, request))) {
- RQ_TRACE(request, "ancestor on hold\n");
- list_add_tail(&request->sched.link, &engine->active.hold);
- i915_request_set_hold(request);
- } else {
- queue_request(engine, request);
-
- GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
- GEM_BUG_ON(list_empty(&request->sched.link));
-
- submit_queue(engine, request);
- }
-
- spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static void __execlists_context_fini(struct intel_context *ce)
-{
- intel_ring_put(ce->ring);
- i915_vma_put(ce->state);
+ intel_ring_put(fetch_and_zero(&ce->ring));
+ i915_vma_put(fetch_and_zero(&ce->state));
}
-static void execlists_context_destroy(struct kref *kref)
+void lrc_destroy(struct kref *kref)
{
struct intel_context *ce = container_of(kref, typeof(*ce), ref);
GEM_BUG_ON(!i915_active_is_idle(&ce->active));
GEM_BUG_ON(intel_context_is_pinned(ce));
- if (ce->state)
- __execlists_context_fini(ce);
+ lrc_fini(ce);
intel_context_fini(ce);
intel_context_free(ce);
}
-static void
-set_redzone(void *vaddr, const struct intel_engine_cs *engine)
-{
- if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- return;
-
- vaddr += engine->context_size;
-
- memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE);
-}
-
-static void
-check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
-{
- if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- return;
-
- vaddr += engine->context_size;
-
- if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
- drm_err_once(&engine->i915->drm,
- "%s context redzone overwritten!\n",
- engine->name);
-}
-
-static void execlists_context_unpin(struct intel_context *ce)
-{
- check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET,
- ce->engine);
-}
-
-static void execlists_context_post_unpin(struct intel_context *ce)
-{
- i915_gem_object_unpin_map(ce->state->obj);
-}
-
static u32 *
gen12_emit_timestamp_wa(const struct intel_context *ce, u32 *cs)
{
@@ -3465,7 +1035,7 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
return cs;
}
-static inline u32 context_wa_bb_offset(const struct intel_context *ce)
+static u32 context_wa_bb_offset(const struct intel_context *ce)
{
return PAGE_SIZE * ce->wa_bb_page;
}
@@ -3496,16 +1066,57 @@ setup_indirect_ctx_bb(const struct intel_context *ce,
while ((unsigned long)cs % CACHELINE_BYTES)
*cs++ = MI_NOOP;
- lrc_ring_setup_indirect_ctx(ce->lrc_reg_state, engine,
- i915_ggtt_offset(ce->state) +
- context_wa_bb_offset(ce),
- (cs - start) * sizeof(*cs));
+ lrc_setup_indirect_ctx(ce->lrc_reg_state, engine,
+ i915_ggtt_offset(ce->state) +
+ context_wa_bb_offset(ce),
+ (cs - start) * sizeof(*cs));
}
-static void
-__execlists_update_reg_state(const struct intel_context *ce,
- const struct intel_engine_cs *engine,
- u32 head)
+/*
+ * The context descriptor encodes various attributes of a context,
+ * including its GTT address and some flags. Because it's fairly
+ * expensive to calculate, we'll just do it once and cache the result,
+ * which remains valid until the context is unpinned.
+ *
+ * This is what a descriptor looks like, from LSB to MSB::
+ *
+ * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
+ * bits 12-31: LRCA, GTT address of (the HWSP of) this context
+ * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC)
+ * bits 53-54: mbz, reserved for use by hardware
+ * bits 55-63: group ID, currently unused and set to 0
+ *
+ * Starting from Gen11, the upper dword of the descriptor has a new format:
+ *
+ * bits 32-36: reserved
+ * bits 37-47: SW context ID
+ * bits 48:53: engine instance
+ * bit 54: mbz, reserved for use by hardware
+ * bits 55-60: SW counter
+ * bits 61-63: engine class
+ *
+ * engine info, SW context ID and SW counter need to form a unique number
+ * (Context ID) per lrc.
+ */
+static u32 lrc_descriptor(const struct intel_context *ce)
+{
+ u32 desc;
+
+ desc = INTEL_LEGACY_32B_CONTEXT;
+ if (i915_vm_is_4lvl(ce->vm))
+ desc = INTEL_LEGACY_64B_CONTEXT;
+ desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
+
+ desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
+ if (IS_GEN(ce->vm->i915, 8))
+ desc |= GEN8_CTX_L3LLC_COHERENT;
+
+ return i915_ggtt_offset(ce->state) | desc;
+}
+
+u32 lrc_update_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ u32 head)
{
struct intel_ring *ring = ce->ring;
u32 *regs = ce->lrc_reg_state;
@@ -3537,205 +1148,54 @@ __execlists_update_reg_state(const struct intel_context *ce,
GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size);
setup_indirect_ctx_bb(ce, engine, fn);
}
-}
-static int
-execlists_context_pre_pin(struct intel_context *ce,
- struct i915_gem_ww_ctx *ww, void **vaddr)
-{
- GEM_BUG_ON(!ce->state);
- GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
-
- *vaddr = i915_gem_object_pin_map(ce->state->obj,
- i915_coherent_map_type(ce->engine->i915) |
- I915_MAP_OVERRIDE);
-
- return PTR_ERR_OR_ZERO(*vaddr);
-}
-
-static int
-__execlists_context_pin(struct intel_context *ce,
- struct intel_engine_cs *engine,
- void *vaddr)
-{
- ce->lrc.lrca = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
- ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET;
- __execlists_update_reg_state(ce, engine, ce->ring->tail);
-
- return 0;
+ return lrc_descriptor(ce) | CTX_DESC_FORCE_RESTORE;
}
-static int execlists_context_pin(struct intel_context *ce, void *vaddr)
+void lrc_update_offsets(struct intel_context *ce,
+ struct intel_engine_cs *engine)
{
- return __execlists_context_pin(ce, ce->engine, vaddr);
+ set_offsets(ce->lrc_reg_state, reg_offsets(engine), engine, false);
}
-static int execlists_context_alloc(struct intel_context *ce)
+void lrc_check_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ const char *when)
{
- return __execlists_context_alloc(ce, ce->engine);
-}
-
-static void execlists_context_reset(struct intel_context *ce)
-{
- CE_TRACE(ce, "reset\n");
- GEM_BUG_ON(!intel_context_is_pinned(ce));
-
- intel_ring_reset(ce->ring, ce->ring->emit);
-
- /* Scrub away the garbage */
- execlists_init_reg_state(ce->lrc_reg_state,
- ce, ce->engine, ce->ring, true);
- __execlists_update_reg_state(ce, ce->engine, ce->ring->tail);
-
- ce->lrc.desc |= CTX_DESC_FORCE_RESTORE;
-}
-
-static const struct intel_context_ops execlists_context_ops = {
- .alloc = execlists_context_alloc,
-
- .pre_pin = execlists_context_pre_pin,
- .pin = execlists_context_pin,
- .unpin = execlists_context_unpin,
- .post_unpin = execlists_context_post_unpin,
-
- .enter = intel_context_enter_engine,
- .exit = intel_context_exit_engine,
-
- .reset = execlists_context_reset,
- .destroy = execlists_context_destroy,
-};
-
-static u32 hwsp_offset(const struct i915_request *rq)
-{
- const struct intel_timeline_cacheline *cl;
-
- /* Before the request is executed, the timeline/cachline is fixed */
-
- cl = rcu_dereference_protected(rq->hwsp_cacheline, 1);
- if (cl)
- return cl->ggtt_offset;
-
- return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset;
-}
-
-static int gen8_emit_init_breadcrumb(struct i915_request *rq)
-{
- u32 *cs;
-
- GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
- if (!i915_request_timeline(rq)->has_initial_breadcrumb)
- return 0;
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /*
- * Check if we have been preempted before we even get started.
- *
- * After this point i915_request_started() reports true, even if
- * we get preempted and so are no longer running.
- */
- *cs++ = MI_ARB_CHECK;
- *cs++ = MI_NOOP;
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = hwsp_offset(rq);
- *cs++ = 0;
- *cs++ = rq->fence.seqno - 1;
-
- intel_ring_advance(rq, cs);
-
- /* Record the updated position of the request's payload */
- rq->infix = intel_ring_offset(rq, cs);
-
- __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
-
- return 0;
-}
-
-static int emit_pdps(struct i915_request *rq)
-{
- const struct intel_engine_cs * const engine = rq->engine;
- struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm);
- int err, i;
- u32 *cs;
-
- GEM_BUG_ON(intel_vgpu_active(rq->engine->i915));
-
- /*
- * Beware ye of the dragons, this sequence is magic!
- *
- * Small changes to this sequence can cause anything from
- * GPU hangs to forcewake errors and machine lockups!
- */
-
- /* Flush any residual operations from the context load */
- err = engine->emit_flush(rq, EMIT_FLUSH);
- if (err)
- return err;
+ const struct intel_ring *ring = ce->ring;
+ u32 *regs = ce->lrc_reg_state;
+ bool valid = true;
+ int x;
- /* Magic required to prevent forcewake errors! */
- err = engine->emit_flush(rq, EMIT_INVALIDATE);
- if (err)
- return err;
-
- cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* Ensure the LRI have landed before we invalidate & continue */
- *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
- for (i = GEN8_3LVL_PDPES; i--; ) {
- const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
- u32 base = engine->mmio_base;
-
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
- *cs++ = upper_32_bits(pd_daddr);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
- *cs++ = lower_32_bits(pd_daddr);
+ if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) {
+ pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n",
+ engine->name,
+ regs[CTX_RING_START],
+ i915_ggtt_offset(ring->vma));
+ regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
+ valid = false;
}
- *cs++ = MI_NOOP;
-
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int execlists_request_alloc(struct i915_request *request)
-{
- int ret;
-
- GEM_BUG_ON(!intel_context_is_pinned(request->context));
- /*
- * Flush enough space to reduce the likelihood of waiting after
- * we start building the request - in which case we will just
- * have to repeat work.
- */
- request->reserved_space += EXECLISTS_REQUEST_SIZE;
-
- /*
- * Note that after this point, we have committed to using
- * this request as it is being used to both track the
- * state of engine initialisation and liveness of the
- * golden renderstate above. Think twice before you try
- * to cancel/unwind this request now.
- */
-
- if (!i915_vm_is_4lvl(request->context->vm)) {
- ret = emit_pdps(request);
- if (ret)
- return ret;
+ if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) !=
+ (RING_CTL_SIZE(ring->size) | RING_VALID)) {
+ pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n",
+ engine->name,
+ regs[CTX_RING_CTL],
+ (u32)(RING_CTL_SIZE(ring->size) | RING_VALID));
+ regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+ valid = false;
}
- /* Unconditionally invalidate GPU caches and TLBs. */
- ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
- if (ret)
- return ret;
+ x = lrc_ring_mi_mode(engine);
+ if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) {
+ pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n",
+ engine->name, regs[x + 1]);
+ regs[x + 1] &= ~STOP_RING;
+ regs[x + 1] |= STOP_RING << 16;
+ valid = false;
+ }
- request->reserved_space -= EXECLISTS_REQUEST_SIZE;
- return 0;
+ WARN_ONCE(!valid, "Invalid lrc state found %s submission\n", when);
}
/*
@@ -3955,7 +1415,7 @@ gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
return batch;
}
-#define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE)
+#define CTX_WA_BB_SIZE (PAGE_SIZE)
static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
{
@@ -3963,7 +1423,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
struct i915_vma *vma;
int err;
- obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_OBJ_SIZE);
+ obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -3985,7 +1445,7 @@ err:
return err;
}
-static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
+void lrc_fini_wa_ctx(struct intel_engine_cs *engine)
{
i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
@@ -3995,23 +1455,24 @@ static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
-static int intel_init_workaround_bb(struct intel_engine_cs *engine)
+void lrc_init_wa_ctx(struct intel_engine_cs *engine)
{
struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
- struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx,
- &wa_ctx->per_ctx };
- wa_bb_func_t wa_bb_fn[2];
+ struct i915_wa_ctx_bb *wa_bb[] = {
+ &wa_ctx->indirect_ctx, &wa_ctx->per_ctx
+ };
+ wa_bb_func_t wa_bb_fn[ARRAY_SIZE(wa_bb)];
void *batch, *batch_ptr;
unsigned int i;
- int ret;
+ int err;
if (engine->class != RENDER_CLASS)
- return 0;
+ return;
switch (INTEL_GEN(engine->i915)) {
case 12:
case 11:
- return 0;
+ return;
case 10:
wa_bb_fn[0] = gen10_init_indirectctx_bb;
wa_bb_fn[1] = NULL;
@@ -4026,14 +1487,20 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
break;
default:
MISSING_CASE(INTEL_GEN(engine->i915));
- return 0;
+ return;
}
- ret = lrc_setup_wa_ctx(engine);
- if (ret) {
- drm_dbg(&engine->i915->drm,
- "Failed to setup context WA page: %d\n", ret);
- return ret;
+ err = lrc_setup_wa_ctx(engine);
+ if (err) {
+ /*
+ * We continue even if we fail to initialize WA batch
+ * because we only expect rare glitches but nothing
+ * critical to prevent us from using GPU
+ */
+ drm_err(&engine->i915->drm,
+ "Ignoring context switch w/a allocation error:%d\n",
+ err);
+ return;
}
batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB);
@@ -4048,2104 +1515,52 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
wa_bb[i]->offset = batch_ptr - batch;
if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
CACHELINE_BYTES))) {
- ret = -EINVAL;
+ err = -EINVAL;
break;
}
if (wa_bb_fn[i])
batch_ptr = wa_bb_fn[i](engine, batch_ptr);
wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
}
- GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
+ GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_SIZE);
__i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch);
__i915_gem_object_release_map(wa_ctx->vma->obj);
- if (ret)
- lrc_destroy_wa_ctx(engine);
-
- return ret;
-}
-
-static void reset_csb_pointers(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- const unsigned int reset_value = execlists->csb_size - 1;
-
- ring_set_paused(engine, 0);
-
- /*
- * Sometimes Icelake forgets to reset its pointers on a GPU reset.
- * Bludgeon them with a mmio update to be sure.
- */
- ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
- 0xffff << 16 | reset_value << 8 | reset_value);
- ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
-
- /*
- * After a reset, the HW starts writing into CSB entry [0]. We
- * therefore have to set our HEAD pointer back one entry so that
- * the *first* entry we check is entry 0. To complicate this further,
- * as we don't wait for the first interrupt after reset, we have to
- * fake the HW write to point back to the last entry so that our
- * inline comparison of our cached head position against the last HW
- * write works even before the first interrupt.
- */
- execlists->csb_head = reset_value;
- WRITE_ONCE(*execlists->csb_write, reset_value);
- wmb(); /* Make sure this is visible to HW (paranoia?) */
-
- /* Check that the GPU does indeed update the CSB entries! */
- memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64));
- invalidate_csb_entries(&execlists->csb_status[0],
- &execlists->csb_status[reset_value]);
-
- /* Once more for luck and our trusty paranoia */
- ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
- 0xffff << 16 | reset_value << 8 | reset_value);
- ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
-
- GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value);
-}
-
-static void execlists_sanitize(struct intel_engine_cs *engine)
-{
- GEM_BUG_ON(execlists_active(&engine->execlists));
-
- /*
- * Poison residual state on resume, in case the suspend didn't!
- *
- * We have to assume that across suspend/resume (or other loss
- * of control) that the contents of our pinned buffers has been
- * lost, replaced by garbage. Since this doesn't always happen,
- * let's poison such state so that we more quickly spot when
- * we falsely assume it has been preserved.
- */
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
-
- reset_csb_pointers(engine);
-
- /*
- * The kernel_context HWSP is stored in the status_page. As above,
- * that may be lost on resume/initialisation, and so we need to
- * reset the value in the HWSP.
- */
- intel_timeline_reset_seqno(engine->kernel_context->timeline);
-
- /* And scrub the dirty cachelines for the HWSP */
- clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
-}
-
-static void enable_error_interrupt(struct intel_engine_cs *engine)
-{
- u32 status;
-
- engine->execlists.error_interrupt = 0;
- ENGINE_WRITE(engine, RING_EMR, ~0u);
- ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */
-
- status = ENGINE_READ(engine, RING_ESR);
- if (unlikely(status)) {
- drm_err(&engine->i915->drm,
- "engine '%s' resumed still in error: %08x\n",
- engine->name, status);
- __intel_gt_reset(engine->gt, engine->mask);
- }
-
- /*
- * On current gen8+, we have 2 signals to play with
- *
- * - I915_ERROR_INSTUCTION (bit 0)
- *
- * Generate an error if the command parser encounters an invalid
- * instruction
- *
- * This is a fatal error.
- *
- * - CP_PRIV (bit 2)
- *
- * Generate an error on privilege violation (where the CP replaces
- * the instruction with a no-op). This also fires for writes into
- * read-only scratch pages.
- *
- * This is a non-fatal error, parsing continues.
- *
- * * there are a few others defined for odd HW that we do not use
- *
- * Since CP_PRIV fires for cases where we have chosen to ignore the
- * error (as the HW is validating and suppressing the mistakes), we
- * only unmask the instruction error bit.
- */
- ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION);
-}
-
-static void enable_execlists(struct intel_engine_cs *engine)
-{
- u32 mode;
-
- assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
-
- intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
-
- if (INTEL_GEN(engine->i915) >= 11)
- mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
- else
- mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
- ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode);
-
- ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
-
- ENGINE_WRITE_FW(engine,
- RING_HWS_PGA,
- i915_ggtt_offset(engine->status_page.vma));
- ENGINE_POSTING_READ(engine, RING_HWS_PGA);
-
- enable_error_interrupt(engine);
-
- engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
-}
-
-static bool unexpected_starting_state(struct intel_engine_cs *engine)
-{
- bool unexpected = false;
-
- if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
- drm_dbg(&engine->i915->drm,
- "STOP_RING still set in RING_MI_MODE\n");
- unexpected = true;
- }
-
- return unexpected;
-}
-
-static int execlists_resume(struct intel_engine_cs *engine)
-{
- intel_mocs_init_engine(engine);
-
- intel_breadcrumbs_reset(engine->breadcrumbs);
-
- if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
- struct drm_printer p = drm_debug_printer(__func__);
-
- intel_engine_dump(engine, &p, NULL);
- }
-
- enable_execlists(engine);
-
- return 0;
-}
-static void execlists_reset_prepare(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- unsigned long flags;
-
- ENGINE_TRACE(engine, "depth<-%d\n",
- atomic_read(&execlists->tasklet.count));
-
- /*
- * Prevent request submission to the hardware until we have
- * completed the reset in i915_gem_reset_finish(). If a request
- * is completed by one engine, it may then queue a request
- * to a second via its execlists->tasklet *just* as we are
- * calling engine->resume() and also writing the ELSP.
- * Turning off the execlists->tasklet until the reset is over
- * prevents the race.
- */
- __tasklet_disable_sync_once(&execlists->tasklet);
- GEM_BUG_ON(!reset_in_progress(execlists));
-
- /* And flush any current direct submission. */
- spin_lock_irqsave(&engine->active.lock, flags);
- spin_unlock_irqrestore(&engine->active.lock, flags);
-
- /*
- * We stop engines, otherwise we might get failed reset and a
- * dead gpu (on elk). Also as modern gpu as kbl can suffer
- * from system hang if batchbuffer is progressing when
- * the reset is issued, regardless of READY_TO_RESET ack.
- * Thus assume it is best to stop engines on all gens
- * where we have a gpu reset.
- *
- * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
- *
- * FIXME: Wa for more modern gens needs to be validated
- */
- ring_set_paused(engine, 1);
- intel_engine_stop_cs(engine);
-
- engine->execlists.reset_ccid = active_ccid(engine);
-}
-
-static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
-{
- int x;
-
- x = lrc_ring_mi_mode(engine);
- if (x != -1) {
- regs[x + 1] &= ~STOP_RING;
- regs[x + 1] |= STOP_RING << 16;
- }
-}
-
-static void __execlists_reset_reg_state(const struct intel_context *ce,
- const struct intel_engine_cs *engine)
-{
- u32 *regs = ce->lrc_reg_state;
-
- __reset_stop_ring(regs, engine);
+ /* Verify that we can handle failure to setup the wa_ctx */
+ if (err || i915_inject_probe_error(engine->i915, -ENODEV))
+ lrc_fini_wa_ctx(engine);
}
-static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct intel_context *ce;
- struct i915_request *rq;
- u32 head;
-
- mb(); /* paranoia: read the CSB pointers from after the reset */
- clflush(execlists->csb_write);
- mb();
-
- process_csb(engine); /* drain preemption events */
-
- /* Following the reset, we need to reload the CSB read/write pointers */
- reset_csb_pointers(engine);
-
- /*
- * Save the currently executing context, even if we completed
- * its request, it was still running at the time of the
- * reset and will have been clobbered.
- */
- rq = active_context(engine, engine->execlists.reset_ccid);
- if (!rq)
- goto unwind;
-
- ce = rq->context;
- GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
-
- if (i915_request_completed(rq)) {
- /* Idle context; tidy up the ring so we can restart afresh */
- head = intel_ring_wrap(ce->ring, rq->tail);
- goto out_replay;
- }
-
- /* We still have requests in-flight; the engine should be active */
- GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
-
- /* Context has requests still in-flight; it should not be idle! */
- GEM_BUG_ON(i915_active_is_idle(&ce->active));
-
- rq = active_request(ce->timeline, rq);
- head = intel_ring_wrap(ce->ring, rq->head);
- GEM_BUG_ON(head == ce->ring->tail);
-
- /*
- * If this request hasn't started yet, e.g. it is waiting on a
- * semaphore, we need to avoid skipping the request or else we
- * break the signaling chain. However, if the context is corrupt
- * the request will not restart and we will be stuck with a wedged
- * device. It is quite often the case that if we issue a reset
- * while the GPU is loading the context image, that the context
- * image becomes corrupt.
- *
- * Otherwise, if we have not started yet, the request should replay
- * perfectly and we do not need to flag the result as being erroneous.
- */
- if (!i915_request_started(rq))
- goto out_replay;
-
- /*
- * If the request was innocent, we leave the request in the ELSP
- * and will try to replay it on restarting. The context image may
- * have been corrupted by the reset, in which case we may have
- * to service a new GPU hang, but more likely we can continue on
- * without impact.
- *
- * If the request was guilty, we presume the context is corrupt
- * and have to at least restore the RING register in the context
- * image back to the expected values to skip over the guilty request.
- */
- __i915_request_reset(rq, stalled);
-
- /*
- * We want a simple context + ring to execute the breadcrumb update.
- * We cannot rely on the context being intact across the GPU hang,
- * so clear it and rebuild just what we need for the breadcrumb.
- * All pending requests for this context will be zapped, and any
- * future request will be after userspace has had the opportunity
- * to recreate its own state.
- */
-out_replay:
- ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
- head, ce->ring->tail);
- __execlists_reset_reg_state(ce, engine);
- __execlists_update_reg_state(ce, engine, head);
- ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */
-
-unwind:
- /* Push back any incomplete requests for replay after the reset. */
- cancel_port_requests(execlists);
- __unwind_incomplete_requests(engine);
-}
-
-static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
-{
- unsigned long flags;
-
- ENGINE_TRACE(engine, "\n");
-
- spin_lock_irqsave(&engine->active.lock, flags);
-
- __execlists_reset(engine, stalled);
-
- spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static void nop_submission_tasklet(unsigned long data)
-{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
-
- /* The driver is wedged; don't process any more events. */
- WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
-}
-
-static void execlists_reset_cancel(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_request *rq, *rn;
- struct rb_node *rb;
- unsigned long flags;
-
- ENGINE_TRACE(engine, "\n");
-
- /*
- * Before we call engine->cancel_requests(), we should have exclusive
- * access to the submission state. This is arranged for us by the
- * caller disabling the interrupt generation, the tasklet and other
- * threads that may then access the same state, giving us a free hand
- * to reset state. However, we still need to let lockdep be aware that
- * we know this state may be accessed in hardirq context, so we
- * disable the irq around this manipulation and we want to keep
- * the spinlock focused on its duties and not accidentally conflate
- * coverage to the submission's irq state. (Similarly, although we
- * shouldn't need to disable irq around the manipulation of the
- * submission's irq state, we also wish to remind ourselves that
- * it is irq state.)
- */
- spin_lock_irqsave(&engine->active.lock, flags);
-
- __execlists_reset(engine, true);
-
- /* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &engine->active.requests, sched.link)
- mark_eio(rq);
- intel_engine_signal_breadcrumbs(engine);
-
- /* Flush the queued requests to the timeline list (for retiring). */
- while ((rb = rb_first_cached(&execlists->queue))) {
- struct i915_priolist *p = to_priolist(rb);
- int i;
-
- priolist_for_each_request_consume(rq, rn, p, i) {
- mark_eio(rq);
- __i915_request_submit(rq);
- }
-
- rb_erase_cached(&p->node, &execlists->queue);
- i915_priolist_free(p);
- }
-
- /* On-hold requests will be flushed to timeline upon their release */
- list_for_each_entry(rq, &engine->active.hold, sched.link)
- mark_eio(rq);
-
- /* Cancel all attached virtual engines */
- while ((rb = rb_first_cached(&execlists->virtual))) {
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
-
- rb_erase_cached(rb, &execlists->virtual);
- RB_CLEAR_NODE(rb);
-
- spin_lock(&ve->base.active.lock);
- rq = fetch_and_zero(&ve->request);
- if (rq) {
- mark_eio(rq);
-
- rq->engine = engine;
- __i915_request_submit(rq);
- i915_request_put(rq);
-
- ve->base.execlists.queue_priority_hint = INT_MIN;
- }
- spin_unlock(&ve->base.active.lock);
- }
-
- /* Remaining _unready_ requests will be nop'ed when submitted */
-
- execlists->queue_priority_hint = INT_MIN;
- execlists->queue = RB_ROOT_CACHED;
-
- GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
- execlists->tasklet.func = nop_submission_tasklet;
-
- spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static void execlists_reset_finish(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
-
- /*
- * After a GPU reset, we may have requests to replay. Do so now while
- * we still have the forcewake to be sure that the GPU is not allowed
- * to sleep before we restart and reload a context.
- */
- GEM_BUG_ON(!reset_in_progress(execlists));
- if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
- execlists->tasklet.func(execlists->tasklet.data);
-
- if (__tasklet_enable(&execlists->tasklet))
- /* And kick in case we missed a new request submission. */
- tasklet_hi_schedule(&execlists->tasklet);
- ENGINE_TRACE(engine, "depth->%d\n",
- atomic_read(&execlists->tasklet.count));
-}
-
-static int gen8_emit_bb_start_noarb(struct i915_request *rq,
- u64 offset, u32 len,
- const unsigned int flags)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /*
- * WaDisableCtxRestoreArbitration:bdw,chv
- *
- * We don't need to perform MI_ARB_ENABLE as often as we do (in
- * particular all the gen that do not need the w/a at all!), if we
- * took care to make sure that on every switch into this context
- * (both ordinary and for preemption) that arbitrartion was enabled
- * we would be fine. However, for gen8 there is another w/a that
- * requires us to not preempt inside GPGPU execution, so we keep
- * arbitration disabled for gen8 batches. Arbitration will be
- * re-enabled before we close the request
- * (engine->emit_fini_breadcrumb).
- */
- *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
-
- /* FIXME(BDW+): Address space and security selectors. */
- *cs++ = MI_BATCH_BUFFER_START_GEN8 |
- (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
-
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int gen8_emit_bb_start(struct i915_request *rq,
- u64 offset, u32 len,
- const unsigned int flags)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-
- *cs++ = MI_BATCH_BUFFER_START_GEN8 |
- (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
- *cs++ = MI_NOOP;
-
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
-{
- ENGINE_WRITE(engine, RING_IMR,
- ~(engine->irq_enable_mask | engine->irq_keep_mask));
- ENGINE_POSTING_READ(engine, RING_IMR);
-}
-
-static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
-{
- ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
-}
-
-static int gen8_emit_flush(struct i915_request *request, u32 mode)
-{
- u32 cmd, *cs;
-
- cs = intel_ring_begin(request, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cmd = MI_FLUSH_DW + 1;
-
- /* We always require a command barrier so that subsequent
- * commands, such as breadcrumb interrupts, are strictly ordered
- * wrt the contents of the write cache being flushed to memory
- * (and thus being coherent from the CPU).
- */
- cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
-
- if (mode & EMIT_INVALIDATE) {
- cmd |= MI_INVALIDATE_TLB;
- if (request->engine->class == VIDEO_DECODE_CLASS)
- cmd |= MI_INVALIDATE_BSD;
- }
-
- *cs++ = cmd;
- *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
- *cs++ = 0; /* upper addr */
- *cs++ = 0; /* value */
- intel_ring_advance(request, cs);
-
- return 0;
-}
-
-static int gen8_emit_flush_render(struct i915_request *request,
- u32 mode)
-{
- bool vf_flush_wa = false, dc_flush_wa = false;
- u32 *cs, flags = 0;
- int len;
-
- flags |= PIPE_CONTROL_CS_STALL;
-
- if (mode & EMIT_FLUSH) {
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_FLUSH_ENABLE;
- }
-
- if (mode & EMIT_INVALIDATE) {
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-
- /*
- * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
- * pipe control.
- */
- if (IS_GEN(request->engine->i915, 9))
- vf_flush_wa = true;
-
- /* WaForGAMHang:kbl */
- if (IS_KBL_GT_REVID(request->engine->i915, 0, KBL_REVID_B0))
- dc_flush_wa = true;
- }
-
- len = 6;
-
- if (vf_flush_wa)
- len += 6;
-
- if (dc_flush_wa)
- len += 12;
-
- cs = intel_ring_begin(request, len);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- if (vf_flush_wa)
- cs = gen8_emit_pipe_control(cs, 0, 0);
-
- if (dc_flush_wa)
- cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
- 0);
-
- cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
-
- if (dc_flush_wa)
- cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
-
- intel_ring_advance(request, cs);
-
- return 0;
-}
-
-static int gen11_emit_flush_render(struct i915_request *request,
- u32 mode)
-{
- if (mode & EMIT_FLUSH) {
- u32 *cs;
- u32 flags = 0;
-
- flags |= PIPE_CONTROL_CS_STALL;
-
- flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-
- cs = intel_ring_begin(request, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
- intel_ring_advance(request, cs);
- }
-
- if (mode & EMIT_INVALIDATE) {
- u32 *cs;
- u32 flags = 0;
-
- flags |= PIPE_CONTROL_CS_STALL;
-
- flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-
- cs = intel_ring_begin(request, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
- intel_ring_advance(request, cs);
- }
-
- return 0;
-}
-
-static u32 preparser_disable(bool state)
-{
- return MI_ARB_CHECK | 1 << 8 | state;
-}
-
-static i915_reg_t aux_inv_reg(const struct intel_engine_cs *engine)
-{
- static const i915_reg_t vd[] = {
- GEN12_VD0_AUX_NV,
- GEN12_VD1_AUX_NV,
- GEN12_VD2_AUX_NV,
- GEN12_VD3_AUX_NV,
- };
-
- static const i915_reg_t ve[] = {
- GEN12_VE0_AUX_NV,
- GEN12_VE1_AUX_NV,
- };
-
- if (engine->class == VIDEO_DECODE_CLASS)
- return vd[engine->instance];
-
- if (engine->class == VIDEO_ENHANCEMENT_CLASS)
- return ve[engine->instance];
-
- GEM_BUG_ON("unknown aux_inv_reg\n");
-
- return INVALID_MMIO_REG;
-}
-
-static u32 *
-gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs)
-{
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(inv_reg);
- *cs++ = AUX_INV;
- *cs++ = MI_NOOP;
-
- return cs;
-}
-
-static int gen12_emit_flush_render(struct i915_request *request,
- u32 mode)
-{
- if (mode & EMIT_FLUSH) {
- u32 flags = 0;
- u32 *cs;
-
- flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
- flags |= PIPE_CONTROL_FLUSH_L3;
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- /* Wa_1409600907:tgl */
- flags |= PIPE_CONTROL_DEPTH_STALL;
- flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_FLUSH_ENABLE;
-
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
- flags |= PIPE_CONTROL_QW_WRITE;
-
- flags |= PIPE_CONTROL_CS_STALL;
-
- cs = intel_ring_begin(request, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cs = gen12_emit_pipe_control(cs,
- PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
- flags, LRC_PPHWSP_SCRATCH_ADDR);
- intel_ring_advance(request, cs);
- }
-
- if (mode & EMIT_INVALIDATE) {
- u32 flags = 0;
- u32 *cs;
-
- flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
-
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
- flags |= PIPE_CONTROL_QW_WRITE;
-
- flags |= PIPE_CONTROL_CS_STALL;
-
- cs = intel_ring_begin(request, 8 + 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /*
- * Prevent the pre-parser from skipping past the TLB
- * invalidate and loading a stale page for the batch
- * buffer / request payload.
- */
- *cs++ = preparser_disable(true);
-
- cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
-
- /* hsdes: 1809175790 */
- cs = gen12_emit_aux_table_inv(GEN12_GFX_CCS_AUX_NV, cs);
-
- *cs++ = preparser_disable(false);
- intel_ring_advance(request, cs);
- }
-
- return 0;
-}
-
-static int gen12_emit_flush(struct i915_request *request, u32 mode)
-{
- intel_engine_mask_t aux_inv = 0;
- u32 cmd, *cs;
-
- cmd = 4;
- if (mode & EMIT_INVALIDATE)
- cmd += 2;
- if (mode & EMIT_INVALIDATE)
- aux_inv = request->engine->mask & ~BIT(BCS0);
- if (aux_inv)
- cmd += 2 * hweight8(aux_inv) + 2;
-
- cs = intel_ring_begin(request, cmd);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- if (mode & EMIT_INVALIDATE)
- *cs++ = preparser_disable(true);
-
- cmd = MI_FLUSH_DW + 1;
-
- /* We always require a command barrier so that subsequent
- * commands, such as breadcrumb interrupts, are strictly ordered
- * wrt the contents of the write cache being flushed to memory
- * (and thus being coherent from the CPU).
- */
- cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
-
- if (mode & EMIT_INVALIDATE) {
- cmd |= MI_INVALIDATE_TLB;
- if (request->engine->class == VIDEO_DECODE_CLASS)
- cmd |= MI_INVALIDATE_BSD;
- }
-
- *cs++ = cmd;
- *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
- *cs++ = 0; /* upper addr */
- *cs++ = 0; /* value */
-
- if (aux_inv) { /* hsdes: 1809175790 */
- struct intel_engine_cs *engine;
- unsigned int tmp;
-
- *cs++ = MI_LOAD_REGISTER_IMM(hweight8(aux_inv));
- for_each_engine_masked(engine, request->engine->gt,
- aux_inv, tmp) {
- *cs++ = i915_mmio_reg_offset(aux_inv_reg(engine));
- *cs++ = AUX_INV;
- }
- *cs++ = MI_NOOP;
- }
-
- if (mode & EMIT_INVALIDATE)
- *cs++ = preparser_disable(false);
-
- intel_ring_advance(request, cs);
-
- return 0;
-}
-
-static void assert_request_valid(struct i915_request *rq)
-{
- struct intel_ring *ring __maybe_unused = rq->ring;
-
- /* Can we unwind this request without appearing to go forwards? */
- GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
-}
-
-/*
- * Reserve space for 2 NOOPs at the end of each request to be
- * used as a workaround for not being allowed to do lite
- * restore with HEAD==TAIL (WaIdleLiteRestore).
- */
-static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
-{
- /* Ensure there's always at least one preemption point per-request. */
- *cs++ = MI_ARB_CHECK;
- *cs++ = MI_NOOP;
- request->wa_tail = intel_ring_offset(request, cs);
-
- /* Check that entire request is less than half the ring */
- assert_request_valid(request);
-
- return cs;
-}
-
-static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs)
-{
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_EQ_SDD;
- *cs++ = 0;
- *cs++ = intel_hws_preempt_address(request->engine);
- *cs++ = 0;
-
- return cs;
-}
-
-static __always_inline u32*
-gen8_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
-{
- *cs++ = MI_USER_INTERRUPT;
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- if (intel_engine_has_semaphores(request->engine))
- cs = emit_preempt_busywait(request, cs);
-
- request->tail = intel_ring_offset(request, cs);
- assert_ring_tail_valid(request->ring, request->tail);
-
- return gen8_emit_wa_tail(request, cs);
-}
-
-static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
-}
-
-static u32 *gen8_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
-}
-
-static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
-{
- cs = gen8_emit_pipe_control(cs,
- PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DC_FLUSH_ENABLE,
- 0);
-
- /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
- cs = gen8_emit_ggtt_write_rcs(cs,
- request->fence.seqno,
- hwsp_offset(request),
- PIPE_CONTROL_FLUSH_ENABLE |
- PIPE_CONTROL_CS_STALL);
-
- return gen8_emit_fini_breadcrumb_tail(request, cs);
-}
-
-static u32 *
-gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
-{
- cs = gen8_emit_ggtt_write_rcs(cs,
- request->fence.seqno,
- hwsp_offset(request),
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_TILE_CACHE_FLUSH |
- PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DC_FLUSH_ENABLE |
- PIPE_CONTROL_FLUSH_ENABLE);
-
- return gen8_emit_fini_breadcrumb_tail(request, cs);
-}
-
-/*
- * Note that the CS instruction pre-parser will not stall on the breadcrumb
- * flush and will continue pre-fetching the instructions after it before the
- * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at
- * BB_START/END instructions, so, even though we might pre-fetch the pre-amble
- * of the next request before the memory has been flushed, we're guaranteed that
- * we won't access the batch itself too early.
- * However, on gen12+ the parser can pre-fetch across the BB_START/END commands,
- * so, if the current request is modifying an instruction in the next request on
- * the same intel_context, we might pre-fetch and then execute the pre-update
- * instruction. To avoid this, the users of self-modifying code should either
- * disable the parser around the code emitting the memory writes, via a new flag
- * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For
- * the in-kernel use-cases we've opted to use a separate context, see
- * reloc_gpu() as an example.
- * All the above applies only to the instructions themselves. Non-inline data
- * used by the instructions is not pre-fetched.
- */
-
-static u32 *gen12_emit_preempt_busywait(struct i915_request *request, u32 *cs)
-{
- *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_EQ_SDD;
- *cs++ = 0;
- *cs++ = intel_hws_preempt_address(request->engine);
- *cs++ = 0;
- *cs++ = 0;
- *cs++ = MI_NOOP;
-
- return cs;
-}
-
-static __always_inline u32*
-gen12_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
-{
- *cs++ = MI_USER_INTERRUPT;
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- if (intel_engine_has_semaphores(request->engine))
- cs = gen12_emit_preempt_busywait(request, cs);
-
- request->tail = intel_ring_offset(request, cs);
- assert_ring_tail_valid(request->ring, request->tail);
-
- return gen8_emit_wa_tail(request, cs);
-}
-
-static u32 *gen12_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- /* XXX Stalling flush before seqno write; post-sync not */
- cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
- return gen12_emit_fini_breadcrumb_tail(rq, cs);
-}
-
-static u32 *
-gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
-{
- cs = gen12_emit_ggtt_write_rcs(cs,
- request->fence.seqno,
- hwsp_offset(request),
- PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_TILE_CACHE_FLUSH |
- PIPE_CONTROL_FLUSH_L3 |
- PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- /* Wa_1409600907:tgl */
- PIPE_CONTROL_DEPTH_STALL |
- PIPE_CONTROL_DC_FLUSH_ENABLE |
- PIPE_CONTROL_FLUSH_ENABLE);
-
- return gen12_emit_fini_breadcrumb_tail(request, cs);
-}
-
-static void execlists_park(struct intel_engine_cs *engine)
-{
- cancel_timer(&engine->execlists.timer);
- cancel_timer(&engine->execlists.preempt);
-}
-
-void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
-{
- engine->submit_request = execlists_submit_request;
- engine->schedule = i915_schedule;
- engine->execlists.tasklet.func = execlists_submission_tasklet;
-
- engine->reset.prepare = execlists_reset_prepare;
- engine->reset.rewind = execlists_reset_rewind;
- engine->reset.cancel = execlists_reset_cancel;
- engine->reset.finish = execlists_reset_finish;
-
- engine->park = execlists_park;
- engine->unpark = NULL;
-
- engine->flags |= I915_ENGINE_SUPPORTS_STATS;
- if (!intel_vgpu_active(engine->i915)) {
- engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
- if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
- engine->flags |= I915_ENGINE_HAS_PREEMPTION;
- if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- engine->flags |= I915_ENGINE_HAS_TIMESLICES;
- }
- }
-
- if (INTEL_GEN(engine->i915) >= 12)
- engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
-
- if (intel_engine_has_preemption(engine))
- engine->emit_bb_start = gen8_emit_bb_start;
- else
- engine->emit_bb_start = gen8_emit_bb_start_noarb;
-}
-
-static void execlists_shutdown(struct intel_engine_cs *engine)
-{
- /* Synchronise with residual timers and any softirq they raise */
- del_timer_sync(&engine->execlists.timer);
- del_timer_sync(&engine->execlists.preempt);
- tasklet_kill(&engine->execlists.tasklet);
-}
-
-static void execlists_release(struct intel_engine_cs *engine)
-{
- engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
-
- execlists_shutdown(engine);
-
- intel_engine_cleanup_common(engine);
- lrc_destroy_wa_ctx(engine);
-}
-
-static void
-logical_ring_default_vfuncs(struct intel_engine_cs *engine)
-{
- /* Default vfuncs which can be overriden by each engine. */
-
- engine->resume = execlists_resume;
-
- engine->cops = &execlists_context_ops;
- engine->request_alloc = execlists_request_alloc;
-
- engine->emit_flush = gen8_emit_flush;
- engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
- engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb;
- if (INTEL_GEN(engine->i915) >= 12) {
- engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb;
- engine->emit_flush = gen12_emit_flush;
- }
- engine->set_default_submission = intel_execlists_set_default_submission;
-
- if (INTEL_GEN(engine->i915) < 11) {
- engine->irq_enable = gen8_logical_ring_enable_irq;
- engine->irq_disable = gen8_logical_ring_disable_irq;
- } else {
- /*
- * TODO: On Gen11 interrupt masks need to be clear
- * to allow C6 entry. Keep interrupts enabled at
- * and take the hit of generating extra interrupts
- * until a more refined solution exists.
- */
- }
-}
-
-static inline void
-logical_ring_default_irqs(struct intel_engine_cs *engine)
-{
- unsigned int shift = 0;
-
- if (INTEL_GEN(engine->i915) < 11) {
- const u8 irq_shifts[] = {
- [RCS0] = GEN8_RCS_IRQ_SHIFT,
- [BCS0] = GEN8_BCS_IRQ_SHIFT,
- [VCS0] = GEN8_VCS0_IRQ_SHIFT,
- [VCS1] = GEN8_VCS1_IRQ_SHIFT,
- [VECS0] = GEN8_VECS_IRQ_SHIFT,
- };
-
- shift = irq_shifts[engine->id];
- }
-
- engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
- engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
- engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
- engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift;
-}
-
-static void rcs_submission_override(struct intel_engine_cs *engine)
-{
- switch (INTEL_GEN(engine->i915)) {
- case 12:
- engine->emit_flush = gen12_emit_flush_render;
- engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
- break;
- case 11:
- engine->emit_flush = gen11_emit_flush_render;
- engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
- break;
- default:
- engine->emit_flush = gen8_emit_flush_render;
- engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
- break;
- }
-}
-
-int intel_execlists_submission_setup(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct drm_i915_private *i915 = engine->i915;
- struct intel_uncore *uncore = engine->uncore;
- u32 base = engine->mmio_base;
-
- tasklet_init(&engine->execlists.tasklet,
- execlists_submission_tasklet, (unsigned long)engine);
- timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
- timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
-
- logical_ring_default_vfuncs(engine);
- logical_ring_default_irqs(engine);
-
- if (engine->class == RENDER_CLASS)
- rcs_submission_override(engine);
-
- if (intel_init_workaround_bb(engine))
- /*
- * We continue even if we fail to initialize WA batch
- * because we only expect rare glitches but nothing
- * critical to prevent us from using GPU
- */
- drm_err(&i915->drm, "WA batch buffer initialization failed\n");
-
- if (HAS_LOGICAL_RING_ELSQ(i915)) {
- execlists->submit_reg = uncore->regs +
- i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
- execlists->ctrl_reg = uncore->regs +
- i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
- } else {
- execlists->submit_reg = uncore->regs +
- i915_mmio_reg_offset(RING_ELSP(base));
- }
-
- execlists->csb_status =
- (u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
-
- execlists->csb_write =
- &engine->status_page.addr[intel_hws_csb_write_index(i915)];
-
- if (INTEL_GEN(i915) < 11)
- execlists->csb_size = GEN8_CSB_ENTRIES;
- else
- execlists->csb_size = GEN11_CSB_ENTRIES;
-
- if (INTEL_GEN(engine->i915) >= 11) {
- execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
- execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
- }
-
- /* Finally, take ownership and responsibility for cleanup! */
- engine->sanitize = execlists_sanitize;
- engine->release = execlists_release;
-
- return 0;
-}
-
-static void init_common_reg_state(u32 * const regs,
- const struct intel_engine_cs *engine,
- const struct intel_ring *ring,
- bool inhibit)
-{
- u32 ctl;
-
- ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
- ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
- if (inhibit)
- ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
- if (INTEL_GEN(engine->i915) < 11)
- ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
- CTX_CTRL_RS_CTX_ENABLE);
- regs[CTX_CONTEXT_CONTROL] = ctl;
-
- regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
- regs[CTX_TIMESTAMP] = 0;
-}
-
-static void init_wa_bb_reg_state(u32 * const regs,
- const struct intel_engine_cs *engine)
-{
- const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx;
-
- if (wa_ctx->per_ctx.size) {
- const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
-
- GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1);
- regs[lrc_ring_wa_bb_per_ctx(engine) + 1] =
- (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
- }
-
- if (wa_ctx->indirect_ctx.size) {
- lrc_ring_setup_indirect_ctx(regs, engine,
- i915_ggtt_offset(wa_ctx->vma) +
- wa_ctx->indirect_ctx.offset,
- wa_ctx->indirect_ctx.size);
- }
-}
-
-static void init_ppgtt_reg_state(u32 *regs, const struct i915_ppgtt *ppgtt)
-{
- if (i915_vm_is_4lvl(&ppgtt->vm)) {
- /* 64b PPGTT (48bit canonical)
- * PDP0_DESCRIPTOR contains the base address to PML4 and
- * other PDP Descriptors are ignored.
- */
- ASSIGN_CTX_PML4(ppgtt, regs);
- } else {
- ASSIGN_CTX_PDP(ppgtt, regs, 3);
- ASSIGN_CTX_PDP(ppgtt, regs, 2);
- ASSIGN_CTX_PDP(ppgtt, regs, 1);
- ASSIGN_CTX_PDP(ppgtt, regs, 0);
- }
-}
-
-static struct i915_ppgtt *vm_alias(struct i915_address_space *vm)
-{
- if (i915_is_ggtt(vm))
- return i915_vm_to_ggtt(vm)->alias;
- else
- return i915_vm_to_ppgtt(vm);
-}
-
-static void execlists_init_reg_state(u32 *regs,
- const struct intel_context *ce,
- const struct intel_engine_cs *engine,
- const struct intel_ring *ring,
- bool inhibit)
-{
- /*
- * A context is actually a big batch buffer with several
- * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
- * values we are setting here are only for the first context restore:
- * on a subsequent save, the GPU will recreate this batchbuffer with new
- * values (including all the missing MI_LOAD_REGISTER_IMM commands that
- * we are not initializing here).
- *
- * Must keep consistent with virtual_update_register_offsets().
- */
- set_offsets(regs, reg_offsets(engine), engine, inhibit);
-
- init_common_reg_state(regs, engine, ring, inhibit);
- init_ppgtt_reg_state(regs, vm_alias(ce->vm));
-
- init_wa_bb_reg_state(regs, engine);
-
- __reset_stop_ring(regs, engine);
-}
-
-static int
-populate_lr_context(struct intel_context *ce,
- struct drm_i915_gem_object *ctx_obj,
- struct intel_engine_cs *engine,
- struct intel_ring *ring)
-{
- bool inhibit = true;
- void *vaddr;
-
- vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- drm_dbg(&engine->i915->drm, "Could not map object pages!\n");
- return PTR_ERR(vaddr);
- }
-
- set_redzone(vaddr, engine);
-
- if (engine->default_state) {
- shmem_read(engine->default_state, 0,
- vaddr, engine->context_size);
- __set_bit(CONTEXT_VALID_BIT, &ce->flags);
- inhibit = false;
- }
-
- /* Clear the ppHWSP (inc. per-context counters) */
- memset(vaddr, 0, PAGE_SIZE);
-
- /*
- * The second page of the context object contains some registers which
- * must be set up prior to the first execution.
- */
- execlists_init_reg_state(vaddr + LRC_STATE_OFFSET,
- ce, engine, ring, inhibit);
-
- __i915_gem_object_flush_map(ctx_obj, 0, engine->context_size);
- i915_gem_object_unpin_map(ctx_obj);
- return 0;
-}
-
-static struct intel_timeline *pinned_timeline(struct intel_context *ce)
-{
- struct intel_timeline *tl = fetch_and_zero(&ce->timeline);
-
- return intel_timeline_create_from_engine(ce->engine,
- page_unmask_bits(tl));
-}
-
-static int __execlists_context_alloc(struct intel_context *ce,
- struct intel_engine_cs *engine)
-{
- struct drm_i915_gem_object *ctx_obj;
- struct intel_ring *ring;
- struct i915_vma *vma;
- u32 context_size;
- int ret;
-
- GEM_BUG_ON(ce->state);
- context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
-
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- context_size += I915_GTT_PAGE_SIZE; /* for redzone */
-
- if (INTEL_GEN(engine->i915) == 12) {
- ce->wa_bb_page = context_size / PAGE_SIZE;
- context_size += PAGE_SIZE;
- }
-
- ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size);
- if (IS_ERR(ctx_obj))
- return PTR_ERR(ctx_obj);
-
- vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto error_deref_obj;
- }
-
- if (!page_mask_bits(ce->timeline)) {
- struct intel_timeline *tl;
-
- /*
- * Use the static global HWSP for the kernel context, and
- * a dynamically allocated cacheline for everyone else.
- */
- if (unlikely(ce->timeline))
- tl = pinned_timeline(ce);
- else
- tl = intel_timeline_create(engine->gt);
- if (IS_ERR(tl)) {
- ret = PTR_ERR(tl);
- goto error_deref_obj;
- }
-
- ce->timeline = tl;
- }
-
- ring = intel_engine_create_ring(engine, (unsigned long)ce->ring);
- if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
- goto error_deref_obj;
- }
-
- ret = populate_lr_context(ce, ctx_obj, engine, ring);
- if (ret) {
- drm_dbg(&engine->i915->drm,
- "Failed to populate LRC: %d\n", ret);
- goto error_ring_free;
- }
-
- ce->ring = ring;
- ce->state = vma;
-
- return 0;
-
-error_ring_free:
- intel_ring_put(ring);
-error_deref_obj:
- i915_gem_object_put(ctx_obj);
- return ret;
-}
-
-static struct list_head *virtual_queue(struct virtual_engine *ve)
-{
- return &ve->base.execlists.default_priolist.requests[0];
-}
-
-static void rcu_virtual_context_destroy(struct work_struct *wrk)
-{
- struct virtual_engine *ve =
- container_of(wrk, typeof(*ve), rcu.work);
- unsigned int n;
-
- GEM_BUG_ON(ve->context.inflight);
-
- /* Preempt-to-busy may leave a stale request behind. */
- if (unlikely(ve->request)) {
- struct i915_request *old;
-
- spin_lock_irq(&ve->base.active.lock);
-
- old = fetch_and_zero(&ve->request);
- if (old) {
- GEM_BUG_ON(!i915_request_completed(old));
- __i915_request_submit(old);
- i915_request_put(old);
- }
-
- spin_unlock_irq(&ve->base.active.lock);
- }
-
- /*
- * Flush the tasklet in case it is still running on another core.
- *
- * This needs to be done before we remove ourselves from the siblings'
- * rbtrees as in the case it is running in parallel, it may reinsert
- * the rb_node into a sibling.
- */
- tasklet_kill(&ve->base.execlists.tasklet);
-
- /* Decouple ourselves from the siblings, no more access allowed. */
- for (n = 0; n < ve->num_siblings; n++) {
- struct intel_engine_cs *sibling = ve->siblings[n];
- struct rb_node *node = &ve->nodes[sibling->id].rb;
-
- if (RB_EMPTY_NODE(node))
- continue;
-
- spin_lock_irq(&sibling->active.lock);
-
- /* Detachment is lazily performed in the execlists tasklet */
- if (!RB_EMPTY_NODE(node))
- rb_erase_cached(node, &sibling->execlists.virtual);
-
- spin_unlock_irq(&sibling->active.lock);
- }
- GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
- GEM_BUG_ON(!list_empty(virtual_queue(ve)));
-
- if (ve->context.state)
- __execlists_context_fini(&ve->context);
- intel_context_fini(&ve->context);
-
- intel_breadcrumbs_free(ve->base.breadcrumbs);
- intel_engine_free_request_pool(&ve->base);
-
- kfree(ve->bonds);
- kfree(ve);
-}
-
-static void virtual_context_destroy(struct kref *kref)
-{
- struct virtual_engine *ve =
- container_of(kref, typeof(*ve), context.ref);
-
- GEM_BUG_ON(!list_empty(&ve->context.signals));
-
- /*
- * When destroying the virtual engine, we have to be aware that
- * it may still be in use from an hardirq/softirq context causing
- * the resubmission of a completed request (background completion
- * due to preempt-to-busy). Before we can free the engine, we need
- * to flush the submission code and tasklets that are still potentially
- * accessing the engine. Flushing the tasklets requires process context,
- * and since we can guard the resubmit onto the engine with an RCU read
- * lock, we can delegate the free of the engine to an RCU worker.
- */
- INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
- queue_rcu_work(system_wq, &ve->rcu);
-}
-
-static void virtual_engine_initial_hint(struct virtual_engine *ve)
-{
- int swp;
-
- /*
- * Pick a random sibling on starting to help spread the load around.
- *
- * New contexts are typically created with exactly the same order
- * of siblings, and often started in batches. Due to the way we iterate
- * the array of sibling when submitting requests, sibling[0] is
- * prioritised for dequeuing. If we make sure that sibling[0] is fairly
- * randomised across the system, we also help spread the load by the
- * first engine we inspect being different each time.
- *
- * NB This does not force us to execute on this engine, it will just
- * typically be the first we inspect for submission.
- */
- swp = prandom_u32_max(ve->num_siblings);
- if (swp)
- swap(ve->siblings[swp], ve->siblings[0]);
-}
-
-static int virtual_context_alloc(struct intel_context *ce)
-{
- struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
-
- return __execlists_context_alloc(ce, ve->siblings[0]);
-}
-
-static int virtual_context_pin(struct intel_context *ce, void *vaddr)
-{
- struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
-
- /* Note: we must use a real engine class for setting up reg state */
- return __execlists_context_pin(ce, ve->siblings[0], vaddr);
-}
-
-static void virtual_context_enter(struct intel_context *ce)
-{
- struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
- unsigned int n;
-
- for (n = 0; n < ve->num_siblings; n++)
- intel_engine_pm_get(ve->siblings[n]);
-
- intel_timeline_enter(ce->timeline);
-}
-
-static void virtual_context_exit(struct intel_context *ce)
+static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
{
- struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
- unsigned int n;
-
- intel_timeline_exit(ce->timeline);
-
- for (n = 0; n < ve->num_siblings; n++)
- intel_engine_pm_put(ve->siblings[n]);
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ ce->runtime.num_underflow++;
+ ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt);
+#endif
}
-static const struct intel_context_ops virtual_context_ops = {
- .alloc = virtual_context_alloc,
-
- .pre_pin = execlists_context_pre_pin,
- .pin = virtual_context_pin,
- .unpin = execlists_context_unpin,
- .post_unpin = execlists_context_post_unpin,
-
- .enter = virtual_context_enter,
- .exit = virtual_context_exit,
-
- .destroy = virtual_context_destroy,
-};
-
-static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
+void lrc_update_runtime(struct intel_context *ce)
{
- struct i915_request *rq;
- intel_engine_mask_t mask;
-
- rq = READ_ONCE(ve->request);
- if (!rq)
- return 0;
-
- /* The rq is ready for submission; rq->execution_mask is now stable. */
- mask = rq->execution_mask;
- if (unlikely(!mask)) {
- /* Invalid selection, submit to a random engine in error */
- i915_request_set_error_once(rq, -ENODEV);
- mask = ve->siblings[0]->mask;
- }
-
- ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
- rq->fence.context, rq->fence.seqno,
- mask, ve->base.execlists.queue_priority_hint);
-
- return mask;
-}
+ u32 old;
+ s32 dt;
-static void virtual_submission_tasklet(unsigned long data)
-{
- struct virtual_engine * const ve = (struct virtual_engine *)data;
- const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
- intel_engine_mask_t mask;
- unsigned int n;
-
- rcu_read_lock();
- mask = virtual_submission_mask(ve);
- rcu_read_unlock();
- if (unlikely(!mask))
+ if (intel_context_is_barrier(ce))
return;
- local_irq_disable();
- for (n = 0; n < ve->num_siblings; n++) {
- struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]);
- struct ve_node * const node = &ve->nodes[sibling->id];
- struct rb_node **parent, *rb;
- bool first;
-
- if (!READ_ONCE(ve->request))
- break; /* already handled by a sibling's tasklet */
-
- if (unlikely(!(mask & sibling->mask))) {
- if (!RB_EMPTY_NODE(&node->rb)) {
- spin_lock(&sibling->active.lock);
- rb_erase_cached(&node->rb,
- &sibling->execlists.virtual);
- RB_CLEAR_NODE(&node->rb);
- spin_unlock(&sibling->active.lock);
- }
- continue;
- }
-
- spin_lock(&sibling->active.lock);
-
- if (!RB_EMPTY_NODE(&node->rb)) {
- /*
- * Cheat and avoid rebalancing the tree if we can
- * reuse this node in situ.
- */
- first = rb_first_cached(&sibling->execlists.virtual) ==
- &node->rb;
- if (prio == node->prio || (prio > node->prio && first))
- goto submit_engine;
-
- rb_erase_cached(&node->rb, &sibling->execlists.virtual);
- }
-
- rb = NULL;
- first = true;
- parent = &sibling->execlists.virtual.rb_root.rb_node;
- while (*parent) {
- struct ve_node *other;
-
- rb = *parent;
- other = rb_entry(rb, typeof(*other), rb);
- if (prio > other->prio) {
- parent = &rb->rb_left;
- } else {
- parent = &rb->rb_right;
- first = false;
- }
- }
-
- rb_link_node(&node->rb, rb, parent);
- rb_insert_color_cached(&node->rb,
- &sibling->execlists.virtual,
- first);
-
-submit_engine:
- GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
- node->prio = prio;
- if (first && prio > sibling->execlists.queue_priority_hint)
- tasklet_hi_schedule(&sibling->execlists.tasklet);
-
- spin_unlock(&sibling->active.lock);
- }
- local_irq_enable();
-}
-
-static void virtual_submit_request(struct i915_request *rq)
-{
- struct virtual_engine *ve = to_virtual_engine(rq->engine);
- struct i915_request *old;
- unsigned long flags;
-
- ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
- rq->fence.context,
- rq->fence.seqno);
-
- GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
-
- spin_lock_irqsave(&ve->base.active.lock, flags);
-
- old = ve->request;
- if (old) { /* background completion event from preempt-to-busy */
- GEM_BUG_ON(!i915_request_completed(old));
- __i915_request_submit(old);
- i915_request_put(old);
- }
-
- if (i915_request_completed(rq)) {
- __i915_request_submit(rq);
-
- ve->base.execlists.queue_priority_hint = INT_MIN;
- ve->request = NULL;
- } else {
- ve->base.execlists.queue_priority_hint = rq_prio(rq);
- ve->request = i915_request_get(rq);
-
- GEM_BUG_ON(!list_empty(virtual_queue(ve)));
- list_move_tail(&rq->sched.link, virtual_queue(ve));
-
- tasklet_hi_schedule(&ve->base.execlists.tasklet);
- }
-
- spin_unlock_irqrestore(&ve->base.active.lock, flags);
-}
-
-static struct ve_bond *
-virtual_find_bond(struct virtual_engine *ve,
- const struct intel_engine_cs *master)
-{
- int i;
-
- for (i = 0; i < ve->num_bonds; i++) {
- if (ve->bonds[i].master == master)
- return &ve->bonds[i];
- }
-
- return NULL;
-}
-
-static void
-virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
-{
- struct virtual_engine *ve = to_virtual_engine(rq->engine);
- intel_engine_mask_t allowed, exec;
- struct ve_bond *bond;
-
- allowed = ~to_request(signal)->engine->mask;
-
- bond = virtual_find_bond(ve, to_request(signal)->engine);
- if (bond)
- allowed &= bond->sibling_mask;
-
- /* Restrict the bonded request to run on only the available engines */
- exec = READ_ONCE(rq->execution_mask);
- while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed))
- ;
-
- /* Prevent the master from being re-run on the bonded engines */
- to_request(signal)->execution_mask &= ~allowed;
-}
-
-struct intel_context *
-intel_execlists_create_virtual(struct intel_engine_cs **siblings,
- unsigned int count)
-{
- struct virtual_engine *ve;
- unsigned int n;
- int err;
-
- if (count == 0)
- return ERR_PTR(-EINVAL);
-
- if (count == 1)
- return intel_context_create(siblings[0]);
-
- ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
- if (!ve)
- return ERR_PTR(-ENOMEM);
-
- ve->base.i915 = siblings[0]->i915;
- ve->base.gt = siblings[0]->gt;
- ve->base.uncore = siblings[0]->uncore;
- ve->base.id = -1;
-
- ve->base.class = OTHER_CLASS;
- ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
- ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
- ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
-
- /*
- * The decision on whether to submit a request using semaphores
- * depends on the saturated state of the engine. We only compute
- * this during HW submission of the request, and we need for this
- * state to be globally applied to all requests being submitted
- * to this engine. Virtual engines encompass more than one physical
- * engine and so we cannot accurately tell in advance if one of those
- * engines is already saturated and so cannot afford to use a semaphore
- * and be pessimized in priority for doing so -- if we are the only
- * context using semaphores after all other clients have stopped, we
- * will be starved on the saturated system. Such a global switch for
- * semaphores is less than ideal, but alas is the current compromise.
- */
- ve->base.saturated = ALL_ENGINES;
-
- snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
-
- intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
- intel_engine_init_execlists(&ve->base);
-
- ve->base.cops = &virtual_context_ops;
- ve->base.request_alloc = execlists_request_alloc;
-
- ve->base.schedule = i915_schedule;
- ve->base.submit_request = virtual_submit_request;
- ve->base.bond_execute = virtual_bond_execute;
-
- INIT_LIST_HEAD(virtual_queue(ve));
- ve->base.execlists.queue_priority_hint = INT_MIN;
- tasklet_init(&ve->base.execlists.tasklet,
- virtual_submission_tasklet,
- (unsigned long)ve);
-
- intel_context_init(&ve->context, &ve->base);
-
- ve->base.breadcrumbs = intel_breadcrumbs_create(NULL);
- if (!ve->base.breadcrumbs) {
- err = -ENOMEM;
- goto err_put;
- }
-
- for (n = 0; n < count; n++) {
- struct intel_engine_cs *sibling = siblings[n];
-
- GEM_BUG_ON(!is_power_of_2(sibling->mask));
- if (sibling->mask & ve->base.mask) {
- DRM_DEBUG("duplicate %s entry in load balancer\n",
- sibling->name);
- err = -EINVAL;
- goto err_put;
- }
-
- /*
- * The virtual engine implementation is tightly coupled to
- * the execlists backend -- we push out request directly
- * into a tree inside each physical engine. We could support
- * layering if we handle cloning of the requests and
- * submitting a copy into each backend.
- */
- if (sibling->execlists.tasklet.func !=
- execlists_submission_tasklet) {
- err = -ENODEV;
- goto err_put;
- }
-
- GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb));
- RB_CLEAR_NODE(&ve->nodes[sibling->id].rb);
-
- ve->siblings[ve->num_siblings++] = sibling;
- ve->base.mask |= sibling->mask;
-
- /*
- * All physical engines must be compatible for their emission
- * functions (as we build the instructions during request
- * construction and do not alter them before submission
- * on the physical engine). We use the engine class as a guide
- * here, although that could be refined.
- */
- if (ve->base.class != OTHER_CLASS) {
- if (ve->base.class != sibling->class) {
- DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
- sibling->class, ve->base.class);
- err = -EINVAL;
- goto err_put;
- }
- continue;
- }
-
- ve->base.class = sibling->class;
- ve->base.uabi_class = sibling->uabi_class;
- snprintf(ve->base.name, sizeof(ve->base.name),
- "v%dx%d", ve->base.class, count);
- ve->base.context_size = sibling->context_size;
-
- ve->base.emit_bb_start = sibling->emit_bb_start;
- ve->base.emit_flush = sibling->emit_flush;
- ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb;
- ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb;
- ve->base.emit_fini_breadcrumb_dw =
- sibling->emit_fini_breadcrumb_dw;
-
- ve->base.flags = sibling->flags;
- }
-
- ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
-
- virtual_engine_initial_hint(ve);
- return &ve->context;
-
-err_put:
- intel_context_put(&ve->context);
- return ERR_PTR(err);
-}
-
-struct intel_context *
-intel_execlists_clone_virtual(struct intel_engine_cs *src)
-{
- struct virtual_engine *se = to_virtual_engine(src);
- struct intel_context *dst;
-
- dst = intel_execlists_create_virtual(se->siblings,
- se->num_siblings);
- if (IS_ERR(dst))
- return dst;
-
- if (se->num_bonds) {
- struct virtual_engine *de = to_virtual_engine(dst->engine);
-
- de->bonds = kmemdup(se->bonds,
- sizeof(*se->bonds) * se->num_bonds,
- GFP_KERNEL);
- if (!de->bonds) {
- intel_context_put(dst);
- return ERR_PTR(-ENOMEM);
- }
-
- de->num_bonds = se->num_bonds;
- }
-
- return dst;
-}
-
-int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
- const struct intel_engine_cs *master,
- const struct intel_engine_cs *sibling)
-{
- struct virtual_engine *ve = to_virtual_engine(engine);
- struct ve_bond *bond;
- int n;
-
- /* Sanity check the sibling is part of the virtual engine */
- for (n = 0; n < ve->num_siblings; n++)
- if (sibling == ve->siblings[n])
- break;
- if (n == ve->num_siblings)
- return -EINVAL;
-
- bond = virtual_find_bond(ve, master);
- if (bond) {
- bond->sibling_mask |= sibling->mask;
- return 0;
- }
-
- bond = krealloc(ve->bonds,
- sizeof(*bond) * (ve->num_bonds + 1),
- GFP_KERNEL);
- if (!bond)
- return -ENOMEM;
-
- bond[ve->num_bonds].master = master;
- bond[ve->num_bonds].sibling_mask = sibling->mask;
-
- ve->bonds = bond;
- ve->num_bonds++;
-
- return 0;
-}
-
-void intel_execlists_show_requests(struct intel_engine_cs *engine,
- struct drm_printer *m,
- void (*show_request)(struct drm_printer *m,
- struct i915_request *rq,
- const char *prefix),
- unsigned int max)
-{
- const struct intel_engine_execlists *execlists = &engine->execlists;
- struct i915_request *rq, *last;
- unsigned long flags;
- unsigned int count;
- struct rb_node *rb;
-
- spin_lock_irqsave(&engine->active.lock, flags);
-
- last = NULL;
- count = 0;
- list_for_each_entry(rq, &engine->active.requests, sched.link) {
- if (count++ < max - 1)
- show_request(m, rq, "\t\tE ");
- else
- last = rq;
- }
- if (last) {
- if (count > max) {
- drm_printf(m,
- "\t\t...skipping %d executing requests...\n",
- count - max);
- }
- show_request(m, last, "\t\tE ");
- }
-
- if (execlists->switch_priority_hint != INT_MIN)
- drm_printf(m, "\t\tSwitch priority hint: %d\n",
- READ_ONCE(execlists->switch_priority_hint));
- if (execlists->queue_priority_hint != INT_MIN)
- drm_printf(m, "\t\tQueue priority hint: %d\n",
- READ_ONCE(execlists->queue_priority_hint));
-
- last = NULL;
- count = 0;
- for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
- struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
- int i;
-
- priolist_for_each_request(rq, p, i) {
- if (count++ < max - 1)
- show_request(m, rq, "\t\tQ ");
- else
- last = rq;
- }
- }
- if (last) {
- if (count > max) {
- drm_printf(m,
- "\t\t...skipping %d queued requests...\n",
- count - max);
- }
- show_request(m, last, "\t\tQ ");
- }
+ old = ce->runtime.last;
+ ce->runtime.last = lrc_get_runtime(ce);
+ dt = ce->runtime.last - old;
- last = NULL;
- count = 0;
- for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- struct i915_request *rq = READ_ONCE(ve->request);
-
- if (rq) {
- if (count++ < max - 1)
- show_request(m, rq, "\t\tV ");
- else
- last = rq;
- }
- }
- if (last) {
- if (count > max) {
- drm_printf(m,
- "\t\t...skipping %d virtual requests...\n",
- count - max);
- }
- show_request(m, last, "\t\tV ");
+ if (unlikely(dt < 0)) {
+ CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
+ old, ce->runtime.last, dt);
+ st_update_runtime_underflow(ce, dt);
+ return;
}
- spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-void intel_lr_context_reset(struct intel_engine_cs *engine,
- struct intel_context *ce,
- u32 head,
- bool scrub)
-{
- GEM_BUG_ON(!intel_context_is_pinned(ce));
-
- /*
- * We want a simple context + ring to execute the breadcrumb update.
- * We cannot rely on the context being intact across the GPU hang,
- * so clear it and rebuild just what we need for the breadcrumb.
- * All pending requests for this context will be zapped, and any
- * future request will be after userspace has had the opportunity
- * to recreate its own state.
- */
- if (scrub)
- restore_default_state(ce, engine);
-
- /* Rerun the request; its payload has been neutered (if guilty). */
- __execlists_update_reg_state(ce, engine, head);
-}
-
-bool
-intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine)
-{
- return engine->set_default_submission ==
- intel_execlists_set_default_submission;
+ ewma_runtime_add(&ce->runtime.avg, dt);
+ ce->runtime.total += dt;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index c2d287f25497..7f697845c4cf 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -1,90 +1,20 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
*/
-#ifndef _INTEL_LRC_H_
-#define _INTEL_LRC_H_
+#ifndef __INTEL_LRC_H__
+#define __INTEL_LRC_H__
#include <linux/types.h>
-struct drm_printer;
+#include "intel_context.h"
+#include "intel_lrc_reg.h"
-struct drm_i915_private;
-struct i915_gem_context;
-struct i915_request;
-struct intel_context;
+struct drm_i915_gem_object;
struct intel_engine_cs;
+struct intel_ring;
-/* Execlists regs */
-#define RING_ELSP(base) _MMIO((base) + 0x230)
-#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234)
-#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4)
-#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244)
-#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
-#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
-#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
-#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2)
-#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE (1 << 8)
-#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0)
-#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
-#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550)
-
-#define EL_CTRL_LOAD (1 << 0)
-
-/* The docs specify that the write pointer wraps around after 5h, "After status
- * is written out to the last available status QW at offset 5h, this pointer
- * wraps to 0."
- *
- * Therefore, one must infer than even though there are 3 bits available, 6 and
- * 7 appear to be * reserved.
- */
-#define GEN8_CSB_ENTRIES 6
-#define GEN8_CSB_PTR_MASK 0x7
-#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
-#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
-
-#define GEN11_CSB_ENTRIES 12
-#define GEN11_CSB_PTR_MASK 0xf
-#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8)
-#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0)
-
-#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
-#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
-#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
-/* in Gen12 ID 0x7FF is reserved to indicate idle */
-#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
-
-enum {
- INTEL_CONTEXT_SCHEDULE_IN = 0,
- INTEL_CONTEXT_SCHEDULE_OUT,
- INTEL_CONTEXT_SCHEDULE_PREEMPTED,
-};
-
-/* Logical Rings */
-void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
-
-int intel_execlists_submission_setup(struct intel_engine_cs *engine);
-
-/* Logical Ring Contexts */
/* At the start of the context image is its per-process HWS page */
#define LRC_PPHWSP_PN (0)
#define LRC_PPHWSP_SZ (1)
@@ -96,32 +26,57 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine);
#define LRC_PPHWSP_SCRATCH 0x34
#define LRC_PPHWSP_SCRATCH_ADDR (LRC_PPHWSP_SCRATCH * sizeof(u32))
-void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
-
-void intel_lr_context_reset(struct intel_engine_cs *engine,
- struct intel_context *ce,
- u32 head,
- bool scrub);
-
-void intel_execlists_show_requests(struct intel_engine_cs *engine,
- struct drm_printer *m,
- void (*show_request)(struct drm_printer *m,
- struct i915_request *rq,
- const char *prefix),
- unsigned int max);
-
-struct intel_context *
-intel_execlists_create_virtual(struct intel_engine_cs **siblings,
- unsigned int count);
-
-struct intel_context *
-intel_execlists_clone_virtual(struct intel_engine_cs *src);
-
-int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
- const struct intel_engine_cs *master,
- const struct intel_engine_cs *sibling);
-
-bool
-intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
-
-#endif /* _INTEL_LRC_H_ */
+void lrc_init_wa_ctx(struct intel_engine_cs *engine);
+void lrc_fini_wa_ctx(struct intel_engine_cs *engine);
+
+int lrc_alloc(struct intel_context *ce,
+ struct intel_engine_cs *engine);
+void lrc_reset(struct intel_context *ce);
+void lrc_fini(struct intel_context *ce);
+void lrc_destroy(struct kref *kref);
+
+int
+lrc_pre_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr);
+int
+lrc_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *vaddr);
+void lrc_unpin(struct intel_context *ce);
+void lrc_post_unpin(struct intel_context *ce);
+
+void lrc_init_state(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *state);
+
+void lrc_init_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ bool clear);
+void lrc_reset_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine);
+
+u32 lrc_update_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ u32 head);
+void lrc_update_offsets(struct intel_context *ce,
+ struct intel_engine_cs *engine);
+
+void lrc_check_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ const char *when);
+
+void lrc_update_runtime(struct intel_context *ce);
+static inline u32 lrc_get_runtime(const struct intel_context *ce)
+{
+ /*
+ * We can use either ppHWSP[16] which is recorded before the context
+ * switch (and so excludes the cost of context switches) or use the
+ * value from the context image itself, which is saved/restored earlier
+ * and so includes the cost of the save.
+ */
+ return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
+}
+
+#endif /* __INTEL_LRC_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 1b51f7b9a5c3..65fe76738335 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -9,6 +9,8 @@
#include <linux/types.h>
+#define CTX_DESC_FORCE_RESTORE BIT_ULL(2)
+
/* GEN8 to GEN12 Reg State Context */
#define CTX_CONTEXT_CONTROL (0x02 + 1)
#define CTX_RING_HEAD (0x04 + 1)
@@ -52,4 +54,43 @@
#define GEN8_EXECLISTS_STATUS_BUF 0x370
#define GEN11_EXECLISTS_STATUS_BUF2 0x3c0
+/* Execlists regs */
+#define RING_ELSP(base) _MMIO((base) + 0x230)
+#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234)
+#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4)
+#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244)
+#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT REG_BIT(0)
+#define CTX_CTRL_RS_CTX_ENABLE REG_BIT(1)
+#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT REG_BIT(2)
+#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3)
+#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE REG_BIT(8)
+#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0)
+#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
+#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550)
+#define EL_CTRL_LOAD REG_BIT(0)
+
+/*
+ * The docs specify that the write pointer wraps around after 5h, "After status
+ * is written out to the last available status QW at offset 5h, this pointer
+ * wraps to 0."
+ *
+ * Therefore, one must infer than even though there are 3 bits available, 6 and
+ * 7 appear to be * reserved.
+ */
+#define GEN8_CSB_ENTRIES 6
+#define GEN8_CSB_PTR_MASK 0x7
+#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
+#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
+
+#define GEN11_CSB_ENTRIES 12
+#define GEN11_CSB_PTR_MASK 0xf
+#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8)
+#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0)
+
+#define MAX_CONTEXT_HW_ID (1 << 21) /* exclusive */
+#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
+#define GEN11_MAX_CONTEXT_HW_ID (1 << 11) /* exclusive */
+/* in Gen12 ID 0x7FF is reserved to indicate idle */
+#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
+
#endif /* _INTEL_LRC_REG_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index ab6870242e18..8acb84960cd0 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -24,8 +24,8 @@
#include "intel_engine.h"
#include "intel_gt.h"
+#include "intel_lrc_reg.h"
#include "intel_mocs.h"
-#include "intel_lrc.h"
#include "intel_ring.h"
/* structures required */
@@ -472,7 +472,7 @@ static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table,
return table->table[I915_MOCS_PTE].l3cc_value;
}
-static inline u32 l3cc_combine(u16 low, u16 high)
+static u32 l3cc_combine(u16 low, u16 high)
{
return low | (u32)high << 16;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 46d9aceda64c..96b85a10ef33 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -80,7 +80,7 @@ void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl)
kfree(pt);
}
-static inline void
+static void
write_dma_entry(struct drm_i915_gem_object * const pdma,
const unsigned short idx,
const u64 encoded_entry)
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index d7b8e4457fc2..35504c97f11d 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -49,7 +49,7 @@ static struct drm_i915_private *rc6_to_i915(struct intel_rc6 *rc)
return rc6_to_gt(rc)->i915;
}
-static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
{
intel_uncore_write_fw(uncore, reg, val);
}
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index 40d8f1a95df6..60393ce5614d 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -95,10 +95,10 @@ region_lmem_init(struct intel_memory_region *mem)
return ret;
}
-const struct intel_memory_region_ops intel_region_lmem_ops = {
+static const struct intel_memory_region_ops intel_region_lmem_ops = {
.init = region_lmem_init,
.release = region_lmem_release,
- .create_object = __i915_gem_lmem_object_create,
+ .init_object = __i915_gem_lmem_object_init,
};
struct intel_memory_region *
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.h b/drivers/gpu/drm/i915/gt/intel_region_lmem.h
index 213def7c7b8a..8ea43e538dab 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.h
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.h
@@ -8,8 +8,6 @@
struct drm_i915_private;
-extern const struct intel_memory_region_ops intel_region_lmem_ops;
-
struct intel_memory_region *
intel_setup_fake_lmem(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index ea2a77c7b469..ca816ba22197 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -27,7 +27,8 @@
#include "i915_drv.h"
#include "intel_renderstate.h"
-#include "gt/intel_context.h"
+#include "intel_context.h"
+#include "intel_gpu_commands.h"
#include "intel_ring.h"
static const struct intel_renderstate_rodata *
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 3654c955e6be..61410cd62927 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -40,20 +40,19 @@ static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
intel_uncore_rmw_fw(uncore, reg, clr, 0);
}
-static void engine_skip_context(struct i915_request *rq)
+static void skip_context(struct i915_request *rq)
{
- struct intel_engine_cs *engine = rq->engine;
struct intel_context *hung_ctx = rq->context;
- if (!i915_request_is_active(rq))
- return;
+ list_for_each_entry_from_rcu(rq, &hung_ctx->timeline->requests, link) {
+ if (!i915_request_is_active(rq))
+ return;
- lockdep_assert_held(&engine->active.lock);
- list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
if (rq->context == hung_ctx) {
i915_request_set_error_once(rq, -EIO);
__i915_request_skip(rq);
}
+ }
}
static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
@@ -152,15 +151,14 @@ static void mark_innocent(struct i915_request *rq)
void __i915_request_reset(struct i915_request *rq, bool guilty)
{
RQ_TRACE(rq, "guilty? %s\n", yesno(guilty));
-
- GEM_BUG_ON(i915_request_completed(rq));
+ GEM_BUG_ON(__i915_request_is_complete(rq));
rcu_read_lock(); /* protect the GEM context */
if (guilty) {
i915_request_set_error_once(rq, -EIO);
__i915_request_skip(rq);
if (mark_guilty(rq))
- engine_skip_context(rq);
+ skip_context(rq);
} else {
i915_request_set_error_once(rq, -EAGAIN);
mark_innocent(rq);
@@ -231,7 +229,7 @@ static int g4x_do_reset(struct intel_gt *gt,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
if (ret) {
- drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
+ GT_TRACE(gt, "Wait for media reset failed\n");
goto out;
}
@@ -239,7 +237,7 @@ static int g4x_do_reset(struct intel_gt *gt,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
if (ret) {
- drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
+ GT_TRACE(gt, "Wait for render reset failed\n");
goto out;
}
@@ -265,7 +263,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
5000, 0,
NULL);
if (ret) {
- drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
+ GT_TRACE(gt, "Wait for render reset failed\n");
goto out;
}
@@ -276,7 +274,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
5000, 0,
NULL);
if (ret) {
- drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
+ GT_TRACE(gt, "Wait for media reset failed\n");
goto out;
}
@@ -305,9 +303,9 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
500, 0,
NULL);
if (err)
- drm_dbg(&gt->i915->drm,
- "Wait for 0x%08x engines reset failed\n",
- hw_domain_mask);
+ GT_TRACE(gt,
+ "Wait for 0x%08x engines reset failed\n",
+ hw_domain_mask);
return err;
}
@@ -407,8 +405,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
return 0;
if (ret) {
- drm_dbg(&engine->i915->drm,
- "Wait for SFC forced lock ack failed\n");
+ ENGINE_TRACE(engine, "Wait for SFC forced lock ack failed\n");
return ret;
}
@@ -499,6 +496,9 @@ static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
u32 request, mask, ack;
int ret;
+ if (I915_SELFTEST_ONLY(should_fail(&engine->reset_timeout, 1)))
+ return -ETIMEDOUT;
+
ack = intel_uncore_read_fw(uncore, reg);
if (ack & RESET_CTL_CAT_ERROR) {
/*
@@ -754,8 +754,10 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
if (err)
return err;
+ local_bh_disable();
for_each_engine(engine, gt, id)
__intel_engine_reset(engine, stalled_mask & engine->mask);
+ local_bh_enable();
intel_ggtt_restore_fences(gt->ggtt);
@@ -833,9 +835,11 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
set_bit(I915_WEDGED, &gt->reset.flags);
/* Mark all executing requests as skipped */
+ local_bh_disable();
for_each_engine(engine, gt, id)
if (engine->reset.cancel)
engine->reset.cancel(engine);
+ local_bh_enable();
reset_finish(gt, awake);
@@ -1105,25 +1109,12 @@ error:
goto finish;
}
-static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
+static int intel_gt_reset_engine(struct intel_engine_cs *engine)
{
return __intel_gt_reset(engine->gt, engine->mask);
}
-/**
- * intel_engine_reset - reset GPU engine to recover from a hang
- * @engine: engine to reset
- * @msg: reason for GPU reset; or NULL for no drm_notice()
- *
- * Reset a specific GPU engine. Useful if a hang is detected.
- * Returns zero on successful reset or otherwise an error code.
- *
- * Procedure is:
- * - identifies the request that caused the hang and it is dropped
- * - reset engine (which will force the engine to idle)
- * - re-init/configure engine
- */
-int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
+int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
{
struct intel_gt *gt = engine->gt;
bool uses_guc = intel_engine_in_guc_submission_mode(engine);
@@ -1148,8 +1139,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
- drm_dbg(&gt->i915->drm, "%sFailed to reset %s, ret=%d\n",
- uses_guc ? "GuC " : "", engine->name, ret);
+ ENGINE_TRACE(engine, "Failed to reset, err: %d\n", ret);
goto out;
}
@@ -1174,6 +1164,30 @@ out:
return ret;
}
+/**
+ * intel_engine_reset - reset GPU engine to recover from a hang
+ * @engine: engine to reset
+ * @msg: reason for GPU reset; or NULL for no drm_notice()
+ *
+ * Reset a specific GPU engine. Useful if a hang is detected.
+ * Returns zero on successful reset or otherwise an error code.
+ *
+ * Procedure is:
+ * - identifies the request that caused the hang and it is dropped
+ * - reset engine (which will force the engine to idle)
+ * - re-init/configure engine
+ */
+int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
+{
+ int err;
+
+ local_bh_disable();
+ err = __intel_engine_reset_bh(engine, msg);
+ local_bh_enable();
+
+ return err;
+}
+
static void intel_gt_reset_global(struct intel_gt *gt,
u32 engine_mask,
const char *reason)
@@ -1186,7 +1200,7 @@ static void intel_gt_reset_global(struct intel_gt *gt,
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
- drm_dbg(&gt->i915->drm, "resetting chip, engines=%x\n", engine_mask);
+ GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask);
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
/* Use a watchdog to ensure that our reset completes */
@@ -1260,18 +1274,20 @@ void intel_gt_handle_error(struct intel_gt *gt,
* single reset fails.
*/
if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
+ local_bh_disable();
for_each_engine_masked(engine, gt, engine_mask, tmp) {
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags))
continue;
- if (intel_engine_reset(engine, msg) == 0)
+ if (__intel_engine_reset_bh(engine, msg) == 0)
engine_mask &= ~engine->mask;
clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags);
}
+ local_bh_enable();
}
if (!engine_mask)
@@ -1380,6 +1396,17 @@ void intel_gt_init_reset(struct intel_gt *gt)
mutex_init(&gt->reset.mutex);
init_srcu_struct(&gt->reset.backoff_srcu);
+ /*
+ * While undesirable to wait inside the shrinker, complain anyway.
+ *
+ * If we have to wait during shrinking, we guarantee forward progress
+ * by forcing the reset. Therefore during the reset we must not
+ * re-enter the shrinker. By declaring that we take the reset mutex
+ * within the shrinker, we forbid ourselves from performing any
+ * fs-reclaim or taking related locks during reset.
+ */
+ i915_gem_shrinker_taints_mutex(gt->i915, &gt->reset.mutex);
+
/* no GPU until we are ready! */
__set_bit(I915_WEDGED, &gt->reset.flags);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index a0eec7c11c0c..7dbf5cc8a333 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -34,6 +34,8 @@ void intel_gt_reset(struct intel_gt *gt,
const char *reason);
int intel_engine_reset(struct intel_engine_cs *engine,
const char *reason);
+int __intel_engine_reset_bh(struct intel_engine_cs *engine,
+ const char *reason);
void __i915_request_reset(struct i915_request *rq, bool guilty);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 4034a4bac7f0..78d1360caa0f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -5,9 +5,11 @@
*/
#include "gem/i915_gem_object.h"
+
#include "i915_drv.h"
#include "i915_vma.h"
#include "intel_engine.h"
+#include "intel_gpu_commands.h"
#include "intel_ring.h"
#include "intel_timeline.h"
@@ -40,7 +42,7 @@ int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
- if (vma->obj->stolen)
+ if (i915_gem_object_is_stolen(vma->obj))
flags |= PIN_MAPPABLE;
else
flags |= PIN_HIGH;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index ecf3a6118a6d..4984ff565424 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -122,31 +122,27 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
hwsp = RING_HWS_PGA(engine->mmio_base);
}
- intel_uncore_write(engine->uncore, hwsp, offset);
- intel_uncore_posting_read(engine->uncore, hwsp);
+ intel_uncore_write_fw(engine->uncore, hwsp, offset);
+ intel_uncore_posting_read_fw(engine->uncore, hwsp);
}
static void flush_cs_tlb(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- if (!IS_GEN_RANGE(dev_priv, 6, 7))
+ if (!IS_GEN_RANGE(engine->i915, 6, 7))
return;
/* ring should be idle before issuing a sync flush*/
- drm_WARN_ON(&dev_priv->drm,
- (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
-
- ENGINE_WRITE(engine, RING_INSTPM,
- _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
- INSTPM_SYNC_FLUSH));
- if (intel_wait_for_register(engine->uncore,
- RING_INSTPM(engine->mmio_base),
- INSTPM_SYNC_FLUSH, 0,
- 1000))
- drm_err(&dev_priv->drm,
- "%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
- engine->name);
+ GEM_DEBUG_WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+
+ ENGINE_WRITE_FW(engine, RING_INSTPM,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (__intel_wait_for_register_fw(engine->uncore,
+ RING_INSTPM(engine->mmio_base),
+ INSTPM_SYNC_FLUSH, 0,
+ 2000, 0, NULL))
+ ENGINE_TRACE(engine,
+ "wait for SyncFlush to complete for TLB invalidation timed out\n");
}
static void ring_setup_status_page(struct intel_engine_cs *engine)
@@ -157,44 +153,6 @@ static void ring_setup_status_page(struct intel_engine_cs *engine)
flush_cs_tlb(engine);
}
-static bool stop_ring(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- if (INTEL_GEN(dev_priv) > 2) {
- ENGINE_WRITE(engine,
- RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING));
- if (intel_wait_for_register(engine->uncore,
- RING_MI_MODE(engine->mmio_base),
- MODE_IDLE,
- MODE_IDLE,
- 1000)) {
- drm_err(&dev_priv->drm,
- "%s : timed out trying to stop ring\n",
- engine->name);
-
- /*
- * Sometimes we observe that the idle flag is not
- * set even though the ring is empty. So double
- * check before giving up.
- */
- if (ENGINE_READ(engine, RING_HEAD) !=
- ENGINE_READ(engine, RING_TAIL))
- return false;
- }
- }
-
- ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL));
-
- ENGINE_WRITE(engine, RING_HEAD, 0);
- ENGINE_WRITE(engine, RING_TAIL, 0);
-
- /* The ring must be empty before it is disabled */
- ENGINE_WRITE(engine, RING_CTL, 0);
-
- return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
-}
-
static struct i915_address_space *vm_alias(struct i915_address_space *vm)
{
if (i915_is_ggtt(vm))
@@ -212,9 +170,16 @@ static void set_pp_dir(struct intel_engine_cs *engine)
{
struct i915_address_space *vm = vm_alias(engine->gt->vm);
- if (vm) {
- ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
- ENGINE_WRITE(engine, RING_PP_DIR_BASE, pp_dir(vm));
+ if (!vm)
+ return;
+
+ ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
+ ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm));
+
+ if (INTEL_GEN(engine->i915) >= 7) {
+ ENGINE_WRITE_FW(engine,
+ RING_MODE_GEN7,
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
}
@@ -222,38 +187,10 @@ static int xcs_resume(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_ring *ring = engine->legacy.ring;
- int ret = 0;
ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
ring->head, ring->tail);
- intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
-
- /* WaClearRingBufHeadRegAtInit:ctg,elk */
- if (!stop_ring(engine)) {
- /* G45 ring initialization often fails to reset head to zero */
- drm_dbg(&dev_priv->drm, "%s head not reset to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- engine->name,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_HEAD),
- ENGINE_READ(engine, RING_TAIL),
- ENGINE_READ(engine, RING_START));
-
- if (!stop_ring(engine)) {
- drm_err(&dev_priv->drm,
- "failed to set %s head to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- engine->name,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_HEAD),
- ENGINE_READ(engine, RING_TAIL),
- ENGINE_READ(engine, RING_START));
- ret = -EIO;
- goto out;
- }
- }
-
if (HWS_NEEDS_PHYSICAL(dev_priv))
ring_setup_phys_status_page(engine);
else
@@ -270,7 +207,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
* also enforces ordering), otherwise the hw might lose the new ring
* register values.
*/
- ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
+ ENGINE_WRITE_FW(engine, RING_START, i915_ggtt_offset(ring->vma));
/* Check that the ring offsets point within the ring! */
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
@@ -280,53 +217,98 @@ static int xcs_resume(struct intel_engine_cs *engine)
set_pp_dir(engine);
/* First wake the ring up to an empty/idle ring */
- ENGINE_WRITE(engine, RING_HEAD, ring->head);
- ENGINE_WRITE(engine, RING_TAIL, ring->head);
+ ENGINE_WRITE_FW(engine, RING_HEAD, ring->head);
+ ENGINE_WRITE_FW(engine, RING_TAIL, ring->head);
ENGINE_POSTING_READ(engine, RING_TAIL);
- ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID);
+ ENGINE_WRITE_FW(engine, RING_CTL,
+ RING_CTL_SIZE(ring->size) | RING_VALID);
/* If the head is still not zero, the ring is dead */
- if (intel_wait_for_register(engine->uncore,
- RING_CTL(engine->mmio_base),
- RING_VALID, RING_VALID,
- 50)) {
- drm_err(&dev_priv->drm, "%s initialization failed "
- "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
- engine->name,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_CTL) & RING_VALID,
- ENGINE_READ(engine, RING_HEAD), ring->head,
- ENGINE_READ(engine, RING_TAIL), ring->tail,
- ENGINE_READ(engine, RING_START),
- i915_ggtt_offset(ring->vma));
- ret = -EIO;
- goto out;
+ if (__intel_wait_for_register_fw(engine->uncore,
+ RING_CTL(engine->mmio_base),
+ RING_VALID, RING_VALID,
+ 5000, 0, NULL)) {
+ drm_err(&dev_priv->drm,
+ "%s initialization failed; "
+ "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
+ engine->name,
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_CTL) & RING_VALID,
+ ENGINE_READ(engine, RING_HEAD), ring->head,
+ ENGINE_READ(engine, RING_TAIL), ring->tail,
+ ENGINE_READ(engine, RING_START),
+ i915_ggtt_offset(ring->vma));
+ return -EIO;
}
if (INTEL_GEN(dev_priv) > 2)
- ENGINE_WRITE(engine,
- RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE_FW(engine,
+ RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
/* Now awake, let it get started */
if (ring->tail != ring->head) {
- ENGINE_WRITE(engine, RING_TAIL, ring->tail);
+ ENGINE_WRITE_FW(engine, RING_TAIL, ring->tail);
ENGINE_POSTING_READ(engine, RING_TAIL);
}
/* Papering over lost _interrupts_ immediately following the restart */
intel_engine_signal_breadcrumbs(engine);
-out:
- intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
+ return 0;
+}
- return ret;
+static void sanitize_hwsp(struct intel_engine_cs *engine)
+{
+ struct intel_timeline *tl;
+
+ list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
+ intel_timeline_reset_seqno(tl);
}
-static void reset_prepare(struct intel_engine_cs *engine)
+static void xcs_sanitize(struct intel_engine_cs *engine)
+{
+ /*
+ * Poison residual state on resume, in case the suspend didn't!
+ *
+ * We have to assume that across suspend/resume (or other loss
+ * of control) that the contents of our pinned buffers has been
+ * lost, replaced by garbage. Since this doesn't always happen,
+ * let's poison such state so that we more quickly spot when
+ * we falsely assume it has been preserved.
+ */
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
+
+ /*
+ * The kernel_context HWSP is stored in the status_page. As above,
+ * that may be lost on resume/initialisation, and so we need to
+ * reset the value in the HWSP.
+ */
+ sanitize_hwsp(engine);
+
+ /* And scrub the dirty cachelines for the HWSP */
+ clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+}
+
+static bool stop_ring(struct intel_engine_cs *engine)
{
- struct intel_uncore *uncore = engine->uncore;
- const u32 base = engine->mmio_base;
+ /* Empty the ring by skipping to the end */
+ ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL));
+ ENGINE_POSTING_READ(engine, RING_HEAD);
+
+ /* The ring must be empty before it is disabled */
+ ENGINE_WRITE_FW(engine, RING_CTL, 0);
+ ENGINE_POSTING_READ(engine, RING_CTL);
+
+ /* Then reset the disabled ring */
+ ENGINE_WRITE_FW(engine, RING_HEAD, 0);
+ ENGINE_WRITE_FW(engine, RING_TAIL, 0);
+
+ return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0;
+}
+static void reset_prepare(struct intel_engine_cs *engine)
+{
/*
* We stop engines, otherwise we might get failed reset and a
* dead gpu (on elk). Also as modern gpu as kbl can suffer
@@ -338,30 +320,35 @@ static void reset_prepare(struct intel_engine_cs *engine)
* WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
*
* WaMediaResetMainRingCleanup:ctg,elk (presumably)
+ * WaClearRingBufHeadRegAtInit:ctg,elk
*
* FIXME: Wa for more modern gens needs to be validated
*/
ENGINE_TRACE(engine, "\n");
+ intel_engine_stop_cs(engine);
- if (intel_engine_stop_cs(engine))
- ENGINE_TRACE(engine, "timed out on STOP_RING\n");
-
- intel_uncore_write_fw(uncore,
- RING_HEAD(base),
- intel_uncore_read_fw(uncore, RING_TAIL(base)));
- intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
-
- intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
- intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
- intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
-
- /* The ring must be empty before it is disabled */
- intel_uncore_write_fw(uncore, RING_CTL(base), 0);
+ if (!stop_ring(engine)) {
+ /* G45 ring initialization often fails to reset head to zero */
+ drm_dbg(&engine->i915->drm,
+ "%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ engine->name,
+ ENGINE_READ_FW(engine, RING_CTL),
+ ENGINE_READ_FW(engine, RING_HEAD),
+ ENGINE_READ_FW(engine, RING_TAIL),
+ ENGINE_READ_FW(engine, RING_START));
+ }
- /* Check acts as a post */
- if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
- ENGINE_TRACE(engine, "ring head [%x] not parked\n",
- intel_uncore_read_fw(uncore, RING_HEAD(base)));
+ if (!stop_ring(engine)) {
+ drm_err(&engine->i915->drm,
+ "failed to set %s head to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ engine->name,
+ ENGINE_READ_FW(engine, RING_CTL),
+ ENGINE_READ_FW(engine, RING_HEAD),
+ ENGINE_READ_FW(engine, RING_TAIL),
+ ENGINE_READ_FW(engine, RING_START));
+ }
}
static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
@@ -372,12 +359,14 @@ static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
rq = NULL;
spin_lock_irqsave(&engine->active.lock, flags);
+ rcu_read_lock();
list_for_each_entry(pos, &engine->active.requests, sched.link) {
- if (!i915_request_completed(pos)) {
+ if (!__i915_request_is_complete(pos)) {
rq = pos;
break;
}
}
+ rcu_read_unlock();
/*
* The guilty request will get skipped on a hung engine.
@@ -441,10 +430,8 @@ static void reset_cancel(struct intel_engine_cs *engine)
spin_lock_irqsave(&engine->active.lock, flags);
/* Mark all submitted requests as skipped. */
- list_for_each_entry(request, &engine->active.requests, sched.link) {
- i915_request_set_error_once(request, -EIO);
- i915_request_mark_complete(request);
- }
+ list_for_each_entry(request, &engine->active.requests, sched.link)
+ i915_request_mark_eio(request);
intel_engine_signal_breadcrumbs(engine);
/* Remaining _unready_ requests will be nop'ed when submitted */
@@ -603,6 +590,7 @@ static int ring_context_pin(struct intel_context *ce, void *unused)
static void ring_context_reset(struct intel_context *ce)
{
intel_ring_reset(ce->ring, ce->ring->emit);
+ clear_bit(CONTEXT_VALID_BIT, &ce->flags);
}
static const struct intel_context_ops ring_context_ops = {
@@ -654,9 +642,9 @@ static int load_pd_dir(struct i915_request *rq,
return rq->engine->emit_flush(rq, EMIT_FLUSH);
}
-static inline int mi_set_context(struct i915_request *rq,
- struct intel_context *ce,
- u32 flags)
+static int mi_set_context(struct i915_request *rq,
+ struct intel_context *ce,
+ u32 flags)
{
struct intel_engine_cs *engine = rq->engine;
struct drm_i915_private *i915 = engine->i915;
@@ -1071,6 +1059,8 @@ static void setup_common(struct intel_engine_cs *engine)
setup_irq(engine);
engine->resume = xcs_resume;
+ engine->sanitize = xcs_sanitize;
+
engine->reset.prepare = reset_prepare;
engine->reset.rewind = reset_rewind;
engine->reset.cancel = reset_cancel;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index b629eeb14002..ee5835c29c03 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -43,7 +43,7 @@ static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
return mask & ~rps->pm_intrmsk_mbz;
}
-static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
{
intel_uncore_write_fw(uncore, reg, val);
}
@@ -400,7 +400,7 @@ static unsigned int gen5_invert_freq(struct intel_rps *rps,
return val;
}
-static bool gen5_rps_set(struct intel_rps *rps, u8 val)
+static int __gen5_rps_set(struct intel_rps *rps, u8 val)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
u16 rgvswctl;
@@ -410,7 +410,7 @@ static bool gen5_rps_set(struct intel_rps *rps, u8 val)
rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
if (rgvswctl & MEMCTL_CMD_STS) {
DRM_DEBUG("gpu busy, RCS change rejected\n");
- return false; /* still busy with another command */
+ return -EBUSY; /* still busy with another command */
}
/* Invert the frequency bin into an ips delay */
@@ -426,7 +426,18 @@ static bool gen5_rps_set(struct intel_rps *rps, u8 val)
rgvswctl |= MEMCTL_CMD_STS;
intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
- return true;
+ return 0;
+}
+
+static int gen5_rps_set(struct intel_rps *rps, u8 val)
+{
+ int err;
+
+ spin_lock_irq(&mchdev_lock);
+ err = __gen5_rps_set(rps, val);
+ spin_unlock_irq(&mchdev_lock);
+
+ return err;
}
static unsigned long intel_pxfreq(u32 vidfreq)
@@ -557,7 +568,7 @@ static bool gen5_rps_enable(struct intel_rps *rps)
"stuck trying to change perf mode\n");
mdelay(1);
- gen5_rps_set(rps, rps->cur_freq);
+ __gen5_rps_set(rps, rps->cur_freq);
rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC);
rps->ips.last_count1 += intel_uncore_read(uncore, DDREC);
@@ -599,7 +610,7 @@ static void gen5_rps_disable(struct intel_rps *rps)
intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
/* Go back to the starting frequency */
- gen5_rps_set(rps, rps->idle_freq);
+ __gen5_rps_set(rps, rps->idle_freq);
mdelay(1);
rgvswctl |= MEMCTL_CMD_STS;
intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
@@ -797,20 +808,19 @@ static int rps_set(struct intel_rps *rps, u8 val, bool update)
struct drm_i915_private *i915 = rps_to_i915(rps);
int err;
- if (INTEL_GEN(i915) < 6)
- return 0;
-
if (val == rps->last_freq)
return 0;
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
err = vlv_rps_set(rps, val);
- else
+ else if (INTEL_GEN(i915) >= 6)
err = gen6_rps_set(rps, val);
+ else
+ err = gen5_rps_set(rps, val);
if (err)
return err;
- if (update)
+ if (update && INTEL_GEN(i915) >= 6)
gen6_rps_set_thresholds(rps, val);
rps->last_freq = val;
@@ -852,6 +862,8 @@ void intel_rps_park(struct intel_rps *rps)
{
int adj;
+ GEM_BUG_ON(atomic_read(&rps->num_waiters));
+
if (!intel_rps_clear_active(rps))
return;
@@ -907,28 +919,27 @@ void intel_rps_park(struct intel_rps *rps)
void intel_rps_boost(struct i915_request *rq)
{
- struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
- unsigned long flags;
-
- if (i915_request_signaled(rq) || !intel_rps_is_active(rps))
+ if (i915_request_signaled(rq) || i915_request_has_waitboost(rq))
return;
/* Serializes with i915_request_retire() */
- spin_lock_irqsave(&rq->lock, flags);
- if (!i915_request_has_waitboost(rq) &&
- !dma_fence_is_signaled_locked(&rq->fence)) {
- set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
+ if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) {
+ struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
+
+ if (atomic_fetch_inc(&rps->num_waiters))
+ return;
+
+ if (!intel_rps_is_active(rps))
+ return;
GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
rq->fence.context, rq->fence.seqno);
- if (!atomic_fetch_inc(&rps->num_waiters) &&
- READ_ONCE(rps->cur_freq) < rps->boost_freq)
+ if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
schedule_work(&rps->work);
- atomic_inc(&rps->boosts);
+ WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */
}
- spin_unlock_irqrestore(&rq->lock, flags);
}
int intel_rps_set(struct intel_rps *rps, u8 val)
@@ -1798,7 +1809,7 @@ void gen5_rps_irq_handler(struct intel_rps *rps)
rps->min_freq_softlimit,
rps->max_freq_softlimit);
- if (new_freq != rps->cur_freq && gen5_rps_set(rps, new_freq))
+ if (new_freq != rps->cur_freq && !__gen5_rps_set(rps, new_freq))
rps->cur_freq = new_freq;
spin_unlock(&mchdev_lock);
@@ -2109,7 +2120,7 @@ bool i915_gpu_turbo_disable(void)
spin_lock_irq(&mchdev_lock);
rps->max_freq_softlimit = rps->min_freq;
- ret = gen5_rps_set(&i915->gt.rps, rps->min_freq);
+ ret = !__gen5_rps_set(&i915->gt.rps, rps->min_freq);
spin_unlock_irq(&mchdev_lock);
drm_dev_put(&i915->drm);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index 38083f0402d9..029fe13cf303 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -93,7 +93,7 @@ struct intel_rps {
} power;
atomic_t num_waiters;
- atomic_t boosts;
+ unsigned int boosts;
/* manual wa residency calculations */
struct intel_rps_ei ei;
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 8015964043eb..037b0e3ccbed 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -319,6 +319,25 @@ __intel_timeline_create(struct intel_gt *gt,
return timeline;
}
+struct intel_timeline *
+intel_timeline_create_from_engine(struct intel_engine_cs *engine,
+ unsigned int offset)
+{
+ struct i915_vma *hwsp = engine->status_page.vma;
+ struct intel_timeline *tl;
+
+ tl = __intel_timeline_create(engine->gt, hwsp, offset);
+ if (IS_ERR(tl))
+ return tl;
+
+ /* Borrow a nearby lock; we only create these timelines during init */
+ mutex_lock(&hwsp->vm->mutex);
+ list_add_tail(&tl->engine_link, &engine->status_page.timelines);
+ mutex_unlock(&hwsp->vm->mutex);
+
+ return tl;
+}
+
void __intel_timeline_pin(struct intel_timeline *tl)
{
GEM_BUG_ON(!atomic_read(&tl->pin_count));
@@ -563,11 +582,11 @@ int intel_timeline_read_hwsp(struct i915_request *from,
rcu_read_lock();
cl = rcu_dereference(from->hwsp_cacheline);
- if (i915_request_completed(from)) /* confirm cacheline is valid */
+ if (i915_request_signaled(from)) /* confirm cacheline is valid */
goto unlock;
if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
goto unlock; /* seqno wrapped and completed! */
- if (unlikely(i915_request_completed(from)))
+ if (unlikely(__i915_request_is_complete(from)))
goto release;
rcu_read_unlock();
@@ -615,6 +634,86 @@ void intel_gt_fini_timelines(struct intel_gt *gt)
GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
}
+void intel_gt_show_timelines(struct intel_gt *gt,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent))
+{
+ struct intel_gt_timelines *timelines = &gt->timelines;
+ struct intel_timeline *tl, *tn;
+ LIST_HEAD(free);
+
+ spin_lock(&timelines->lock);
+ list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
+ unsigned long count, ready, inflight;
+ struct i915_request *rq, *rn;
+ struct dma_fence *fence;
+
+ if (!mutex_trylock(&tl->mutex)) {
+ drm_printf(m, "Timeline %llx: busy; skipping\n",
+ tl->fence_context);
+ continue;
+ }
+
+ intel_timeline_get(tl);
+ GEM_BUG_ON(!atomic_read(&tl->active_count));
+ atomic_inc(&tl->active_count); /* pin the list element */
+ spin_unlock(&timelines->lock);
+
+ count = 0;
+ ready = 0;
+ inflight = 0;
+ list_for_each_entry_safe(rq, rn, &tl->requests, link) {
+ if (i915_request_completed(rq))
+ continue;
+
+ count++;
+ if (i915_request_is_ready(rq))
+ ready++;
+ if (i915_request_is_active(rq))
+ inflight++;
+ }
+
+ drm_printf(m, "Timeline %llx: { ", tl->fence_context);
+ drm_printf(m, "count: %lu, ready: %lu, inflight: %lu",
+ count, ready, inflight);
+ drm_printf(m, ", seqno: { current: %d, last: %d }",
+ *tl->hwsp_seqno, tl->seqno);
+ fence = i915_active_fence_get(&tl->last_request);
+ if (fence) {
+ drm_printf(m, ", engine: %s",
+ to_request(fence)->engine->name);
+ dma_fence_put(fence);
+ }
+ drm_printf(m, " }\n");
+
+ if (show_request) {
+ list_for_each_entry_safe(rq, rn, &tl->requests, link)
+ show_request(m, rq, "", 2);
+ }
+
+ mutex_unlock(&tl->mutex);
+ spin_lock(&timelines->lock);
+
+ /* Resume list iteration after reacquiring spinlock */
+ list_safe_reset_next(tl, tn, link);
+ if (atomic_dec_and_test(&tl->active_count))
+ list_del(&tl->link);
+
+ /* Defer the final release to after the spinlock */
+ if (refcount_dec_and_test(&tl->kref.refcount)) {
+ GEM_BUG_ON(atomic_read(&tl->active_count));
+ list_add(&tl->link, &free);
+ }
+ }
+ spin_unlock(&timelines->lock);
+
+ list_for_each_entry_safe(tl, tn, &free, link)
+ __intel_timeline_free(&tl->kref);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "gt/selftests/mock_timeline.c"
#include "gt/selftest_timeline.c"
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index 9882cd911d8e..dcdee692a80e 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -31,6 +31,8 @@
#include "i915_syncmap.h"
#include "intel_timeline_types.h"
+struct drm_printer;
+
struct intel_timeline *
__intel_timeline_create(struct intel_gt *gt,
struct i915_vma *global_hwsp,
@@ -42,14 +44,9 @@ intel_timeline_create(struct intel_gt *gt)
return __intel_timeline_create(gt, NULL, 0);
}
-static inline struct intel_timeline *
+struct intel_timeline *
intel_timeline_create_from_engine(struct intel_engine_cs *engine,
- unsigned int offset)
-{
- return __intel_timeline_create(engine->gt,
- engine->status_page.vma,
- offset);
-}
+ unsigned int offset);
static inline struct intel_timeline *
intel_timeline_get(struct intel_timeline *timeline)
@@ -106,4 +103,18 @@ int intel_timeline_read_hwsp(struct i915_request *from,
void intel_gt_init_timelines(struct intel_gt *gt);
void intel_gt_fini_timelines(struct intel_gt *gt);
+void intel_gt_show_timelines(struct intel_gt *gt,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent));
+
+static inline bool
+intel_timeline_is_last(const struct intel_timeline *tl,
+ const struct i915_request *rq)
+{
+ return list_is_last_rcu(&rq->link, &tl->requests);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index 4474f487f589..e360f50706bf 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -84,6 +84,8 @@ struct intel_timeline {
struct list_head link;
struct intel_gt *gt;
+ struct list_head engine_link;
+
struct kref kref;
struct rcu_head rcu;
};
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index adc9a8ea410a..3fdcd5ff71dd 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
#include "intel_gt.h"
#include "intel_ring.h"
#include "intel_workarounds.h"
@@ -194,7 +195,7 @@ static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
}
static void
-wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
+wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
{
wa_add(wal, reg, clear, set, clear);
}
@@ -202,21 +203,32 @@ wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
static void
wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
- wa_write_masked_or(wal, reg, ~0, set);
+ wa_write_clr_set(wal, reg, ~0, set);
}
static void
wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
- wa_write_masked_or(wal, reg, set, set);
+ wa_write_clr_set(wal, reg, set, set);
}
static void
wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
{
- wa_write_masked_or(wal, reg, clr, 0);
+ wa_write_clr_set(wal, reg, clr, 0);
}
+/*
+ * WA operations on "masked register". A masked register has the upper 16 bits
+ * documented as "masked" in b-spec. Its purpose is to allow writing to just a
+ * portion of the register without a rmw: you simply write in the upper 16 bits
+ * the mask of bits you are going to modify.
+ *
+ * The wa_masked_* family of functions already does the necessary operations to
+ * calculate the mask based on the parameters passed, so user only has to
+ * provide the lower 16 bits of that register.
+ */
+
static void
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
@@ -229,38 +241,36 @@ wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val);
}
-#define WA_SET_BIT_MASKED(addr, mask) \
- wa_masked_en(wal, (addr), (mask))
-
-#define WA_CLR_BIT_MASKED(addr, mask) \
- wa_masked_dis(wal, (addr), (mask))
-
-#define WA_SET_FIELD_MASKED(addr, mask, value) \
- wa_write_masked_or(wal, (addr), 0, _MASKED_FIELD((mask), (value)))
+static void
+wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
+ u32 mask, u32 val)
+{
+ wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask);
+}
static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+ wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
}
static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+ wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
}
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+ wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
/* WaDisableAsyncFlipPerfMode:bdw,chv */
- WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+ wa_masked_en(wal, MI_MODE, ASYNC_FLIP_PERF_DISABLE);
/* WaDisablePartialInstShootdown:bdw,chv */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN,
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for for a possible hang in the unlikely event a TLB
@@ -268,9 +278,9 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
*/
/* WaForceEnableNonCoherent:bdw,chv */
/* WaHdcDisableFetchWhenMasked:bdw,chv */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_DONOT_FETCH_MEM_WHEN_MASKED |
- HDC_FORCE_NON_COHERENT);
+ wa_masked_en(wal, HDC_CHICKEN0,
+ HDC_DONOT_FETCH_MEM_WHEN_MASKED |
+ HDC_FORCE_NON_COHERENT);
/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
* "The Hierarchical Z RAW Stall Optimization allows non-overlapping
@@ -280,10 +290,10 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
*
* This optimization is off by default for BDW and CHV; turn it on.
*/
- WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
+ wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
/* Wa4x4STCOptimizationDisable:bdw,chv */
- WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
+ wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
/*
* BSpec recommends 8x4 when MSAA is used,
@@ -293,7 +303,7 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
- WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ wa_masked_field_set(wal, GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
}
@@ -306,24 +316,24 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
gen8_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* WaDisableDopClockGating:bdw
*
* Also see the related UCGTCL1 write in bdw_init_clock_gating()
* to disable EUTC clock gating.
*/
- WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
- DOP_CLOCK_GATING_DISABLE);
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ DOP_CLOCK_GATING_DISABLE);
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
- GEN8_SAMPLER_POWER_BYPASS_DIS);
+ wa_masked_en(wal, HALF_SLICE_CHICKEN3,
+ GEN8_SAMPLER_POWER_BYPASS_DIS);
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- /* WaForceContextSaveRestoreNonCoherent:bdw */
- HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
- /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
- (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+ wa_masked_en(wal, HDC_CHICKEN0,
+ /* WaForceContextSaveRestoreNonCoherent:bdw */
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
+ (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
}
static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -332,10 +342,10 @@ static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
gen8_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:chv */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* Improve HiZ throughput on CHV. */
- WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
+ wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
}
static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -349,38 +359,38 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
* Must match Display Engine. See
* WaCompressedResourceDisplayNewHashMode.
*/
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN9_PBE_COMPRESSED_HASH_SELECTION);
- WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
- GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN9_PBE_COMPRESSED_HASH_SELECTION);
+ wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
}
/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- FLOW_CONTROL_ENABLE |
- PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN,
+ FLOW_CONTROL_ENABLE |
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
- WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
- GEN9_ENABLE_YV12_BUGFIX |
- GEN9_ENABLE_GPGPU_PREEMPTION);
+ wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_ENABLE_YV12_BUGFIX |
+ GEN9_ENABLE_GPGPU_PREEMPTION);
/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
- WA_SET_BIT_MASKED(CACHE_MODE_1,
- GEN8_4x4_STC_OPTIMIZATION_DISABLE |
- GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
+ wa_masked_en(wal, CACHE_MODE_1,
+ GEN8_4x4_STC_OPTIMIZATION_DISABLE |
+ GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
- WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
- GEN9_CCS_TLB_PREFETCH_ENABLE);
+ wa_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5,
+ GEN9_CCS_TLB_PREFETCH_ENABLE);
/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
- HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+ wa_masked_en(wal, HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
* both tied to WaForceContextSaveRestoreNonCoherent
@@ -396,19 +406,19 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
*/
/* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_NON_COHERENT);
+ wa_masked_en(wal, HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
if (IS_SKYLAKE(i915) ||
IS_KABYLAKE(i915) ||
IS_COFFEELAKE(i915) ||
IS_COMETLAKE(i915))
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
- GEN8_SAMPLER_POWER_BYPASS_DIS);
+ wa_masked_en(wal, HALF_SLICE_CHICKEN3,
+ GEN8_SAMPLER_POWER_BYPASS_DIS);
/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
+ wa_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
/*
* Supporting preemption with fine-granularity requires changes in the
@@ -422,16 +432,16 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
*/
/* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
- WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+ wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
/* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
- WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
if (IS_GEN9_LP(i915))
- WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
+ wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
}
static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
@@ -465,7 +475,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
return;
/* Tune IZ hashing. See intel_device_info_runtime_init() */
- WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ wa_masked_field_set(wal, GEN7_GT_MODE,
GEN9_IZ_HASHING_MASK(2) |
GEN9_IZ_HASHING_MASK(1) |
GEN9_IZ_HASHING_MASK(0),
@@ -487,12 +497,12 @@ static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
gen9_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:bxt */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- STALL_DOP_GATING_DISABLE);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN,
+ STALL_DOP_GATING_DISABLE);
/* WaToEnableHwFixForPushConstHWBug:bxt */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
}
static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -504,12 +514,12 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
/* WaToEnableHwFixForPushConstHWBug:kbl */
if (IS_KBL_GT_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableSbeCacheDispatchPortSharing:kbl */
- WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
- GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -518,8 +528,8 @@ static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:glk */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
}
static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -528,41 +538,41 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:cfl */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableSbeCacheDispatchPortSharing:cfl */
- WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
- GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
/* WaForceContextSaveRestoreNonCoherent:cnl */
- WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
- HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
+ wa_masked_en(wal, CNL_HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaPushConstantDereferenceHoldDisable:cnl */
- WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
/* FtrEnableFastAnisoL1BankingFix:cnl */
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
+ wa_masked_en(wal, HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
/* WaDisable3DMidCmdPreemption:cnl */
- WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+ wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
/* WaDisableGPGPUMidCmdPreemption:cnl */
- WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
/* WaDisableEarlyEOT:cnl */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
}
static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -580,8 +590,8 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
* Formerly known as WaPushConstantDereferenceHoldDisable
*/
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
- WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
- PUSH_CONSTANT_DEREF_DISABLE);
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ PUSH_CONSTANT_DEREF_DISABLE);
/* WaForceEnableNonCoherent:icl
* This is not the same workaround as in early Gen9 platforms, where
@@ -590,40 +600,40 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
* (the register is whitelisted in hardware now, so UMDs can opt in
* for coherency if they have a good reason).
*/
- WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
+ wa_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
/* Wa_2006611047:icl (pre-prod)
* Formerly known as WaDisableImprovedTdlClkGating
*/
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
- WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
- GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
/* Wa_2006665173:icl (pre-prod) */
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
- WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
- GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
+ wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
+ GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
/* WaEnableFloatBlendOptimization:icl */
- wa_write_masked_or(wal,
- GEN10_CACHE_MODE_SS,
- 0, /* write-only, so skip validation */
- _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
+ wa_write_clr_set(wal,
+ GEN10_CACHE_MODE_SS,
+ 0, /* write-only, so skip validation */
+ _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
/* WaDisableGPGPUMidThreadPreemption:icl */
- WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
/* allow headerless messages for preemptible GPGPU context */
- WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE,
- GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
+ wa_masked_en(wal, GEN10_SAMPLER_MODE,
+ GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
/* Wa_1604278689:icl,ehl */
wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
- wa_write_masked_or(wal, IVB_FBC_RT_BASE_UPPER,
- 0, /* write-only register; skip validation */
- 0xFFFFFFFF);
+ wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
+ 0, /* write-only register; skip validation */
+ 0xFFFFFFFF);
/* Wa_1406306137:icl,ehl */
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
@@ -643,11 +653,11 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
* Wa_14010443199:rkl
* Wa_14010698770:rkl
*/
- WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
- GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
+ wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
+ GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
/* WaDisableGPGPUMidThreadPreemption:gen12 */
- WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
}
@@ -680,12 +690,22 @@ static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
gen12_ctx_workarounds_init(engine, wal);
/* Wa_1409044764 */
- WA_CLR_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
- DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
+ wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3,
+ DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
/* Wa_22010493298 */
- WA_SET_BIT_MASKED(HIZ_CHICKEN,
- DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
+ wa_masked_en(wal, HIZ_CHICKEN,
+ DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
+
+ /*
+ * Wa_16011163337
+ *
+ * Like in tgl_ctx_workarounds_init(), read verification is ignored due
+ * to Wa_1608008084.
+ */
+ wa_add(wal,
+ FF_MODE2,
+ FF_MODE2_GS_TIMER_MASK, FF_MODE2_GS_TIMER_224, 0);
}
static void
@@ -804,57 +824,11 @@ ilk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
static void
snb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
- wa_masked_en(wal,
- _3D_CHICKEN,
- _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
-
- /* WaDisable_RenderCache_OperationalFlush:snb */
- wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- wa_add(wal,
- GEN6_GT_MODE, 0,
- _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
- GEN6_WIZ_HASHING_16x4);
-
- wa_masked_dis(wal, CACHE_MODE_0, CM0_STC_EVICT_DISABLE_LRA_SNB);
-
- wa_masked_en(wal,
- _3D_CHICKEN3,
- /* WaStripsFansDisableFastClipPerformanceFix:snb */
- _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
- /*
- * Bspec says:
- * "This bit must be set if 3DSTATE_CLIP clip mode is set
- * to normal and 3DSTATE_SF number of SF output attributes
- * is more than 16."
- */
- _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
}
static void
ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- /* WaDisableEarlyCull:ivb */
- wa_masked_en(wal, _3D_CHICKEN3, _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
-
- /* WaDisablePSDDualDispatchEnable:ivb */
- if (IS_IVB_GT1(i915))
- wa_masked_en(wal,
- GEN7_HALF_SLICE_CHICKEN1,
- GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
-
- /* WaDisable_RenderCache_OperationalFlush:ivb */
- wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
-
/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
wa_masked_dis(wal,
GEN7_COMMON_SLICE_CHICKEN1,
@@ -866,91 +840,15 @@ ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
/* WaForceL3Serialization:ivb */
wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
-
- /*
- * WaVSThreadDispatchOverride:ivb,vlv
- *
- * This actually overrides the dispatch
- * mode for all thread types.
- */
- wa_write_masked_or(wal, GEN7_FF_THREAD_MODE,
- GEN7_FF_SCHED_MASK,
- GEN7_FF_TS_SCHED_HW |
- GEN7_FF_VS_SCHED_HW |
- GEN7_FF_DS_SCHED_HW);
-
- if (0) { /* causes HiZ corruption on ivb:gt1 */
- /* enable HiZ Raw Stall Optimization */
- wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
- }
-
- /* WaDisable4x2SubspanOptimization:ivb */
- wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- wa_add(wal, GEN7_GT_MODE, 0,
- _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
- GEN6_WIZ_HASHING_16x4);
}
static void
vlv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- /* WaDisableEarlyCull:vlv */
- wa_masked_en(wal, _3D_CHICKEN3, _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
-
- /* WaPsdDispatchEnable:vlv */
- /* WaDisablePSDDualDispatchEnable:vlv */
- wa_masked_en(wal,
- GEN7_HALF_SLICE_CHICKEN1,
- GEN7_MAX_PS_THREAD_DEP |
- GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
-
- /* WaDisable_RenderCache_OperationalFlush:vlv */
- wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
-
/* WaForceL3Serialization:vlv */
wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
/*
- * WaVSThreadDispatchOverride:ivb,vlv
- *
- * This actually overrides the dispatch
- * mode for all thread types.
- */
- wa_write_masked_or(wal,
- GEN7_FF_THREAD_MODE,
- GEN7_FF_SCHED_MASK,
- GEN7_FF_TS_SCHED_HW |
- GEN7_FF_VS_SCHED_HW |
- GEN7_FF_DS_SCHED_HW);
-
- /*
- * BSpec says this must be set, even though
- * WaDisable4x2SubspanOptimization isn't listed for VLV.
- */
- wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- wa_add(wal, GEN7_GT_MODE, 0,
- _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
- GEN6_WIZ_HASHING_16x4);
-
- /*
* WaIncreaseL3CreditsForVLVB0:vlv
* This is the hardware default actually.
*/
@@ -970,31 +868,6 @@ hsw_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
/* WaVSRefCountFullforceMissDisable:hsw */
wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
-
- wa_masked_dis(wal,
- CACHE_MODE_0_GEN7,
- /* WaDisable_RenderCache_OperationalFlush:hsw */
- RC_OP_FLUSH_ENABLE |
- /* enable HiZ Raw Stall Optimization */
- HIZ_RAW_STALL_OPT_DISABLE);
-
- /* WaDisable4x2SubspanOptimization:hsw */
- wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- wa_add(wal, GEN7_GT_MODE, 0,
- _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
- GEN6_WIZ_HASHING_16x4);
-
- /* WaSampleCChickenBitEnable:hsw */
- wa_masked_en(wal, HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
}
static void
@@ -1164,7 +1037,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
drm_dbg(&i915->drm, "MCR slice/subslice = %x\n", mcr);
- wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
+ wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
}
static void
@@ -1189,10 +1062,10 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaModifyGamTlbPartitioning:icl */
- wa_write_masked_or(wal,
- GEN11_GACB_PERF_CTRL,
- GEN11_HASH_CTRL_MASK,
- GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
+ wa_write_clr_set(wal,
+ GEN11_GACB_PERF_CTRL,
+ GEN11_HASH_CTRL_MASK,
+ GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
/* Wa_1405766107:icl
* Formerly known as WaCL2SFHalfMaxAlloc
@@ -1260,6 +1133,11 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
+
+ /* Wa_1408615072:tgl[a0] */
+ if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ VSUNIT_CLKGATE_DIS_TGL);
}
static void
@@ -1358,9 +1236,9 @@ static bool
wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
{
if ((cur ^ wa->set) & wa->read) {
- DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x)\n",
+ DRM_ERROR("%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
name, from, i915_mmio_reg_offset(wa->reg),
- cur, cur & wa->read, wa->set);
+ cur, cur & wa->read, wa->set & wa->read);
return false;
}
@@ -1425,7 +1303,8 @@ bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
return wa_list_verify(gt->uncore, &gt->i915->gt_wa_list, from);
}
-static inline bool is_nonpriv_flags_valid(u32 flags)
+__maybe_unused
+static bool is_nonpriv_flags_valid(u32 flags)
{
/* Check only valid flag bits are set */
if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
@@ -1752,10 +1631,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal,
GEN7_SARCHKMD,
GEN7_DISABLE_SAMPLER_PREFETCH);
-
- /* Wa_1408615072:tgl */
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
- VSUNIT_CLKGATE_DIS_TGL);
}
if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
@@ -1770,6 +1645,19 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
*/
wa_write_or(wal, GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
+
+ /*
+ * Wa_1606700617:tgl,dg1
+ * Wa_22010271021:tgl,rkl,dg1
+ */
+ wa_masked_en(wal,
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
+
+ /* Wa_1406941453:tgl,rkl,dg1 */
+ wa_masked_en(wal,
+ GEN10_SAMPLER_MODE,
+ ENABLE_SMALLPL);
}
if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
@@ -1798,21 +1686,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN6_RC_SLEEP_PSMI_CONTROL,
GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
GEN8_RC_SEMA_IDLE_MSG_DISABLE);
-
- /*
- * Wa_1606700617:tgl
- * Wa_22010271021:tgl,rkl
- */
- wa_masked_en(wal,
- GEN9_CS_DEBUG_MODE1,
- FF_DOP_CLOCK_GATE_DISABLE);
- }
-
- if (IS_GEN(i915, 12)) {
- /* Wa_1406941453:gen12 */
- wa_masked_en(wal,
- GEN10_SAMPLER_MODE,
- ENABLE_SMALLPL);
}
if (IS_GEN(i915, 11)) {
@@ -1838,14 +1711,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
* Wa_1604223664:icl
* Formerly known as WaL3BankAddressHashing
*/
- wa_write_masked_or(wal,
- GEN8_GARBCNTL,
- GEN11_HASH_CTRL_EXCL_MASK,
- GEN11_HASH_CTRL_EXCL_BIT0);
- wa_write_masked_or(wal,
- GEN11_GLBLINVL,
- GEN11_BANK_HASH_ADDR_EXCL_MASK,
- GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+ wa_write_clr_set(wal,
+ GEN8_GARBCNTL,
+ GEN11_HASH_CTRL_EXCL_MASK,
+ GEN11_HASH_CTRL_EXCL_BIT0);
+ wa_write_clr_set(wal,
+ GEN11_GLBLINVL,
+ GEN11_BANK_HASH_ADDR_EXCL_MASK,
+ GEN11_BANK_HASH_ADDR_EXCL_BIT0);
/*
* Wa_1405733216:icl
@@ -1874,10 +1747,10 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_DISABLE_SAMPLER_PREFETCH);
/* Wa_1409178092:icl */
- wa_write_masked_or(wal,
- GEN11_SCRATCH2,
- GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
- 0);
+ wa_write_clr_set(wal,
+ GEN11_SCRATCH2,
+ GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
+ 0);
/* WaEnable32PlaneMode:icl */
wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
@@ -1951,11 +1824,11 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
if (IS_GEN9_LP(i915))
- wa_write_masked_or(wal,
- GEN8_L3SQCREG1,
- L3_PRIO_CREDITS_MASK,
- L3_GENERAL_PRIO_CREDITS(62) |
- L3_HIGH_PRIO_CREDITS(2));
+ wa_write_clr_set(wal,
+ GEN8_L3SQCREG1,
+ L3_PRIO_CREDITS_MASK,
+ L3_GENERAL_PRIO_CREDITS(62) |
+ L3_HIGH_PRIO_CREDITS(2));
/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
wa_write_or(wal,
@@ -1963,12 +1836,112 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN8_LQSC_FLUSH_COHERENT_LINES);
}
- if (IS_GEN(i915, 7))
+ if (IS_HASWELL(i915)) {
+ /* WaSampleCChickenBitEnable:hsw */
+ wa_masked_en(wal,
+ HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
+
+ wa_masked_dis(wal,
+ CACHE_MODE_0_GEN7,
+ /* enable HiZ Raw Stall Optimization */
+ HIZ_RAW_STALL_OPT_DISABLE);
+
+ /* WaDisable4x2SubspanOptimization:hsw */
+ wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+ }
+
+ if (IS_VALLEYVIEW(i915)) {
+ /* WaDisableEarlyCull:vlv */
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
+
+ /*
+ * WaVSThreadDispatchOverride:ivb,vlv
+ *
+ * This actually overrides the dispatch
+ * mode for all thread types.
+ */
+ wa_write_clr_set(wal,
+ GEN7_FF_THREAD_MODE,
+ GEN7_FF_SCHED_MASK,
+ GEN7_FF_TS_SCHED_HW |
+ GEN7_FF_VS_SCHED_HW |
+ GEN7_FF_DS_SCHED_HW);
+
+ /* WaPsdDispatchEnable:vlv */
+ /* WaDisablePSDDualDispatchEnable:vlv */
+ wa_masked_en(wal,
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_MAX_PS_THREAD_DEP |
+ GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
+ }
+
+ if (IS_IVYBRIDGE(i915)) {
+ /* WaDisableEarlyCull:ivb */
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
+
+ if (0) { /* causes HiZ corruption on ivb:gt1 */
+ /* enable HiZ Raw Stall Optimization */
+ wa_masked_dis(wal,
+ CACHE_MODE_0_GEN7,
+ HIZ_RAW_STALL_OPT_DISABLE);
+ }
+
+ /*
+ * WaVSThreadDispatchOverride:ivb,vlv
+ *
+ * This actually overrides the dispatch
+ * mode for all thread types.
+ */
+ wa_write_clr_set(wal,
+ GEN7_FF_THREAD_MODE,
+ GEN7_FF_SCHED_MASK,
+ GEN7_FF_TS_SCHED_HW |
+ GEN7_FF_VS_SCHED_HW |
+ GEN7_FF_DS_SCHED_HW);
+
+ /* WaDisablePSDDualDispatchEnable:ivb */
+ if (IS_IVB_GT1(i915))
+ wa_masked_en(wal,
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
+ }
+
+ if (IS_GEN(i915, 7)) {
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
wa_masked_en(wal,
GFX_MODE_GEN7,
GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
+ /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
+ wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
+
+ /*
+ * BSpec says this must be set, even though
+ * WaDisable4x2SubspanOptimization:ivb,hsw
+ * WaDisable4x2SubspanOptimization isn't listed for VLV.
+ */
+ wa_masked_en(wal,
+ CACHE_MODE_1,
+ PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ wa_add(wal, GEN7_GT_MODE, 0,
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK,
+ GEN6_WIZ_HASHING_16x4),
+ GEN6_WIZ_HASHING_16x4);
+ }
+
if (IS_GEN_RANGE(i915, 6, 7))
/*
* We need to disable the AsyncFlip performance optimisations in
@@ -1991,6 +1964,39 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GFX_MODE,
GFX_TLB_INVALIDATE_EXPLICIT);
+ /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
+ wa_masked_en(wal,
+ _3D_CHICKEN,
+ _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
+
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ /* WaStripsFansDisableFastClipPerformanceFix:snb */
+ _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
+ /*
+ * Bspec says:
+ * "This bit must be set if 3DSTATE_CLIP clip mode is set
+ * to normal and 3DSTATE_SF number of SF output attributes
+ * is more than 16."
+ */
+ _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ wa_add(wal,
+ GEN6_GT_MODE, 0,
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
+ GEN6_WIZ_HASHING_16x4);
+
+ /* WaDisable_RenderCache_OperationalFlush:snb */
+ wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
+
/*
* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
@@ -2067,39 +2073,6 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
wa_list_apply(engine->uncore, &engine->wa_list);
}
-static struct i915_vma *
-create_scratch(struct i915_address_space *vm, int count)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- unsigned int size;
- int err;
-
- size = round_up(count * sizeof(u32), PAGE_SIZE);
- obj = i915_gem_object_create_internal(vm->i915, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
-
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0,
- i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
- if (err)
- goto err_obj;
-
- return vma;
-
-err_obj:
- i915_gem_object_put(obj);
- return ERR_PTR(err);
-}
-
struct mcr_range {
u32 start;
u32 end;
@@ -2202,7 +2175,8 @@ static int engine_wa_list_verify(struct intel_context *ce,
if (!wal->count)
return 0;
- vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count);
+ vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
+ wal->count * sizeof(u32));
if (IS_ERR(vma))
return PTR_ERR(vma);
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 2f830017c51d..4b4f03b70df7 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -245,17 +245,6 @@ static void mock_reset_rewind(struct intel_engine_cs *engine, bool stalled)
GEM_BUG_ON(stalled);
}
-static void mark_eio(struct i915_request *rq)
-{
- if (i915_request_completed(rq))
- return;
-
- GEM_BUG_ON(i915_request_signaled(rq));
-
- i915_request_set_error_once(rq, -EIO);
- i915_request_mark_complete(rq);
-}
-
static void mock_reset_cancel(struct intel_engine_cs *engine)
{
struct mock_engine *mock =
@@ -269,12 +258,12 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link)
- mark_eio(rq);
+ i915_request_mark_eio(rq);
intel_engine_signal_breadcrumbs(engine);
/* Cancel and submit all pending requests. */
list_for_each_entry(rq, &mock->hw_queue, mock.link) {
- mark_eio(rq);
+ i915_request_mark_eio(rq);
__i915_request_submit(rq);
}
INIT_LIST_HEAD(&mock->hw_queue);
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index 1f4020e906a8..db738d400168 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -25,7 +25,7 @@ static int request_sync(struct i915_request *rq)
/* Opencode i915_request_add() so we can keep the timeline locked. */
__i915_request_commit(rq);
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- __i915_request_queue(rq, NULL);
+ __i915_request_queue_bh(rq);
timeout = i915_request_wait(rq, 0, HZ / 10);
if (timeout < 0)
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index 729c3c7b11e2..439c8984f5fa 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -6,6 +6,7 @@
#include <linux/sort.h>
+#include "intel_gpu_commands.h"
#include "intel_gt_pm.h"
#include "intel_rps.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index b88aa35ad75b..223ab88f7e57 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -197,6 +197,7 @@ static int cmp_u32(const void *_a, const void *_b)
static int __live_heartbeat_fast(struct intel_engine_cs *engine)
{
+ const unsigned int error_threshold = max(20000u, jiffies_to_usecs(6));
struct intel_context *ce;
struct i915_request *rq;
ktime_t t0, t1;
@@ -254,12 +255,18 @@ static int __live_heartbeat_fast(struct intel_engine_cs *engine)
times[0],
times[ARRAY_SIZE(times) - 1]);
- /* Min work delay is 2 * 2 (worst), +1 for scheduling, +1 for slack */
- if (times[ARRAY_SIZE(times) / 2] > jiffies_to_usecs(6)) {
+ /*
+ * Ideally, the upper bound on min work delay would be something like
+ * 2 * 2 (worst), +1 for scheduling, +1 for slack. In practice, we
+ * are, even with system_wq_highpri, at the mercy of the CPU scheduler
+ * and may be stuck behind some slow work for many millisecond. Such
+ * as our very own display workers.
+ */
+ if (times[ARRAY_SIZE(times) / 2] > error_threshold) {
pr_err("%s: Heartbeat delay was %uus, expected less than %dus\n",
engine->name,
times[ARRAY_SIZE(times) / 2],
- jiffies_to_usecs(6));
+ error_threshold);
err = -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index b08fc5390e8a..c3d965279fc3 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -4,13 +4,215 @@
* Copyright © 2018 Intel Corporation
*/
+#include <linux/sort.h>
+
#include "i915_selftest.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt_clock_utils.h"
#include "selftest_engine.h"
#include "selftest_engine_heartbeat.h"
#include "selftests/igt_atomic.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_spinner.h"
+#define COUNT 5
+
+static int cmp_u64(const void *A, const void *B)
+{
+ const u64 *a = A, *b = B;
+
+ return *a - *b;
+}
+
+static u64 trifilter(u64 *a)
+{
+ sort(a, COUNT, sizeof(*a), cmp_u64, NULL);
+ return (a[1] + 2 * a[2] + a[3]) >> 2;
+}
+
+static u32 *emit_wait(u32 *cs, u32 offset, int op, u32 value)
+{
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ op;
+ *cs++ = value;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *emit_store(u32 *cs, u32 offset, u32 value)
+{
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = value;
+
+ return cs;
+}
+
+static u32 *emit_srm(u32 *cs, i915_reg_t reg, u32 offset)
+{
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(reg);
+ *cs++ = offset;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static void write_semaphore(u32 *x, u32 value)
+{
+ WRITE_ONCE(*x, value);
+ wmb();
+}
+
+static int __measure_timestamps(struct intel_context *ce,
+ u64 *dt, u64 *d_ring, u64 *d_ctx)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ u32 *sema = memset32(engine->status_page.addr + 1000, 0, 5);
+ u32 offset = i915_ggtt_offset(engine->status_page.vma);
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 28);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ /* Signal & wait for start */
+ cs = emit_store(cs, offset + 4008, 1);
+ cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_NEQ_SDD, 1);
+
+ cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4000);
+ cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4004);
+
+ /* Busy wait */
+ cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_EQ_SDD, 1);
+
+ cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4016);
+ cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4012);
+
+ intel_ring_advance(rq, cs);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ intel_engine_flush_submission(engine);
+
+ /* Wait for the request to start executing, that then waits for us */
+ while (READ_ONCE(sema[2]) == 0)
+ cpu_relax();
+
+ /* Run the request for a 100us, sampling timestamps before/after */
+ preempt_disable();
+ *dt = local_clock();
+ write_semaphore(&sema[2], 0);
+ udelay(100);
+ *dt = local_clock() - *dt;
+ write_semaphore(&sema[2], 1);
+ preempt_enable();
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ i915_request_put(rq);
+ return -ETIME;
+ }
+ i915_request_put(rq);
+
+ pr_debug("%s CTX_TIMESTAMP: [%x, %x], RING_TIMESTAMP: [%x, %x]\n",
+ engine->name, sema[1], sema[3], sema[0], sema[4]);
+
+ *d_ctx = sema[3] - sema[1];
+ *d_ring = sema[4] - sema[0];
+ return 0;
+}
+
+static int __live_engine_timestamps(struct intel_engine_cs *engine)
+{
+ u64 s_ring[COUNT], s_ctx[COUNT], st[COUNT], d_ring, d_ctx, dt;
+ struct intel_context *ce;
+ int i, err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ for (i = 0; i < COUNT; i++) {
+ err = __measure_timestamps(ce, &st[i], &s_ring[i], &s_ctx[i]);
+ if (err)
+ break;
+ }
+ intel_context_put(ce);
+ if (err)
+ return err;
+
+ dt = trifilter(st);
+ d_ring = trifilter(s_ring);
+ d_ctx = trifilter(s_ctx);
+
+ pr_info("%s elapsed:%lldns, CTX_TIMESTAMP:%lldns, RING_TIMESTAMP:%lldns\n",
+ engine->name, dt,
+ intel_gt_clock_interval_to_ns(engine->gt, d_ctx),
+ intel_gt_clock_interval_to_ns(engine->gt, d_ring));
+
+ d_ring = intel_gt_clock_interval_to_ns(engine->gt, d_ring);
+ if (3 * dt > 4 * d_ring || 4 * dt < 3 * d_ring) {
+ pr_err("%s Mismatch between ring timestamp and walltime!\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ d_ring = trifilter(s_ring);
+ d_ctx = trifilter(s_ctx);
+
+ d_ctx *= engine->gt->clock_frequency;
+ if (IS_ICELAKE(engine->i915))
+ d_ring *= 12500000; /* Fixed 80ns for icl ctx timestamp? */
+ else
+ d_ring *= engine->gt->clock_frequency;
+
+ if (3 * d_ctx > 4 * d_ring || 4 * d_ctx < 3 * d_ring) {
+ pr_err("%s Mismatch between ring and context timestamps!\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int live_engine_timestamps(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Check that CS_TIMESTAMP / CTX_TIMESTAMP are in sync, i.e. share
+ * the same CS clock.
+ */
+
+ if (INTEL_GEN(gt->i915) < 8)
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ int err;
+
+ st_engine_heartbeat_disable(engine);
+ err = __live_engine_timestamps(engine);
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int live_engine_busy_stats(void *arg)
{
struct intel_gt *gt = arg;
@@ -177,6 +379,7 @@ static int live_engine_pm(void *arg)
int live_engine_pm_selftests(struct intel_gt *gt)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(live_engine_timestamps),
SUBTEST(live_engine_busy_stats),
SUBTEST(live_engine_pm),
};
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
new file mode 100644
index 000000000000..264b5ebdb021
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -0,0 +1,4741 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_heartbeat.h"
+#include "gt/intel_reset.h"
+#include "gt/selftest_engine_heartbeat.h"
+
+#include "i915_selftest.h"
+#include "selftests/i915_random.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_live_test.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/lib_sw_fence.h"
+
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
+
+#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
+#define NUM_GPR 16
+#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
+
+static bool is_active(struct i915_request *rq)
+{
+ if (i915_request_is_active(rq))
+ return true;
+
+ if (i915_request_on_hold(rq))
+ return true;
+
+ if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
+ return true;
+
+ return false;
+}
+
+static int wait_for_submit(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
+{
+ /* Ignore our own attempts to suppress excess tasklets */
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+
+ timeout += jiffies;
+ do {
+ bool done = time_after(jiffies, timeout);
+
+ if (i915_request_completed(rq)) /* that was quick! */
+ return 0;
+
+ /* Wait until the HW has acknowleged the submission (or err) */
+ intel_engine_flush_submission(engine);
+ if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
+ return 0;
+
+ if (done)
+ return -ETIME;
+
+ cond_resched();
+ } while (1);
+}
+
+static int wait_for_reset(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
+{
+ timeout += jiffies;
+
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+
+ if (READ_ONCE(engine->execlists.pending[0]))
+ continue;
+
+ if (i915_request_completed(rq))
+ break;
+
+ if (READ_ONCE(rq->fence.error))
+ break;
+ } while (time_before(jiffies, timeout));
+
+ flush_scheduled_work();
+
+ if (rq->fence.error != -EIO) {
+ pr_err("%s: hanging request %llx:%lld not reset\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ return -EINVAL;
+ }
+
+ /* Give the request a jiffie to complete after flushing the worker */
+ if (i915_request_wait(rq, 0,
+ max(0l, (long)(timeout - jiffies)) + 1) < 0) {
+ pr_err("%s: hanging request %llx:%lld did not complete\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int live_sanitycheck(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ctx;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ GEM_TRACE("spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto out_ctx;
+ }
+
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ goto out_ctx;
+ }
+
+out_ctx:
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_unlite_restore(struct intel_gt *gt, int prio)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = -ENOMEM;
+
+ /*
+ * Check that we can correctly context switch between 2 instances
+ * on the same engine from the same parent context.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return err;
+
+ err = 0;
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq[2];
+ struct igt_live_test t;
+ int n;
+
+ if (prio && !intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+ st_engine_heartbeat_disable(engine);
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ /*
+ * Setup the pair of contexts such that if we
+ * lite-restore using the RING_TAIL from ce[1] it
+ * will execute garbage from ce[0]->ring.
+ */
+ memset(tmp->ring->vaddr,
+ POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */
+ tmp->ring->vma->size);
+
+ ce[n] = tmp;
+ }
+ GEM_BUG_ON(!ce[1]->ring->size);
+ intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
+ lrc_update_regs(ce[1], engine, ce[1]->ring->head);
+
+ rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ goto err_ce;
+ }
+
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
+
+ if (!igt_wait_for_spinner(&spin, rq[0])) {
+ i915_request_put(rq[0]);
+ goto err_ce;
+ }
+
+ rq[1] = i915_request_create(ce[1]);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ i915_request_put(rq[0]);
+ goto err_ce;
+ }
+
+ if (!prio) {
+ /*
+ * Ensure we do the switch to ce[1] on completion.
+ *
+ * rq[0] is already submitted, so this should reduce
+ * to a no-op (a wait on a request on the same engine
+ * uses the submit fence, not the completion fence),
+ * but it will install a dependency on rq[1] for rq[0]
+ * that will prevent the pair being reordered by
+ * timeslicing.
+ */
+ i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ }
+
+ i915_request_get(rq[1]);
+ i915_request_add(rq[1]);
+ GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix);
+ i915_request_put(rq[0]);
+
+ if (prio) {
+ struct i915_sched_attr attr = {
+ .priority = prio,
+ };
+
+ /* Alternatively preempt the spinner with ce[1] */
+ engine->schedule(rq[1], &attr);
+ }
+
+ /* And switch back to ce[0] for good measure */
+ rq[0] = i915_request_create(ce[0]);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ i915_request_put(rq[1]);
+ goto err_ce;
+ }
+
+ i915_request_await_dma_fence(rq[0], &rq[1]->fence);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix);
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+
+err_ce:
+ intel_engine_flush_submission(engine);
+ igt_spinner_end(&spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_unlite_switch(void *arg)
+{
+ return live_unlite_restore(arg, 0);
+}
+
+static int live_unlite_preempt(void *arg)
+{
+ return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
+}
+
+static int live_unlite_ring(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct igt_spinner spin;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Setup a preemption event that will cause almost the entire ring
+ * to be unwound, potentially fooling our intel_ring_direction()
+ * into emitting a forward lite-restore instead of the rollback.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int n;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+ st_engine_heartbeat_disable(engine);
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ memset32(tmp->ring->vaddr,
+ 0xdeadbeef, /* trigger a hang if executed */
+ tmp->ring->vma->size / sizeof(u32));
+
+ ce[n] = tmp;
+ }
+
+ /* Create max prio spinner, followed by N low prio nops */
+ rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ i915_request_get(rq);
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ce;
+ }
+
+ /* Fill the ring, until we will cause a wrap */
+ n = 0;
+ while (intel_ring_direction(ce[0]->ring,
+ rq->wa_tail,
+ ce[0]->ring->tail) <= 0) {
+ struct i915_request *tmp;
+
+ tmp = intel_context_create_request(ce[0]);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ i915_request_put(rq);
+ goto err_ce;
+ }
+
+ i915_request_add(tmp);
+ intel_engine_flush_submission(engine);
+ n++;
+ }
+ intel_engine_flush_submission(engine);
+ pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
+ engine->name, n,
+ ce[0]->ring->size,
+ ce[0]->ring->tail,
+ ce[0]->ring->emit,
+ rq->tail);
+ GEM_BUG_ON(intel_ring_direction(ce[0]->ring,
+ rq->tail,
+ ce[0]->ring->tail) <= 0);
+ i915_request_put(rq);
+
+ /* Create a second ring to preempt the first ring after rq[0] */
+ rq = intel_context_create_request(ce[1]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = wait_for_submit(engine, rq, HZ / 2);
+ i915_request_put(rq);
+ if (err) {
+ pr_err("%s: preemption request was not submitted\n",
+ engine->name);
+ err = -ETIME;
+ }
+
+ pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
+ engine->name,
+ ce[0]->ring->tail, ce[0]->ring->emit,
+ ce[1]->ring->tail, ce[1]->ring->emit);
+
+err_ce:
+ intel_engine_flush_submission(engine);
+ igt_spinner_end(&spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+ st_engine_heartbeat_enable(engine);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_pin_rewind(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * We have to be careful not to trust intel_ring too much, for example
+ * ring->head is updated upon retire which is out of sync with pinning
+ * the context. Thus we cannot use ring->head to set CTX_RING_HEAD,
+ * or else we risk writing an older, stale value.
+ *
+ * To simulate this, let's apply a bit of deliberate sabotague.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct intel_ring *ring;
+ struct igt_live_test t;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ break;
+ }
+
+ /* Keep the context awake while we play games */
+ err = i915_active_acquire(&ce->active);
+ if (err) {
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ break;
+ }
+ ring = ce->ring;
+
+ /* Poison the ring, and offset the next request from HEAD */
+ memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
+ ring->emit = ring->size / 2;
+ ring->tail = ring->emit;
+ GEM_BUG_ON(ring->head);
+
+ intel_context_unpin(ce);
+
+ /* Submit a simple nop request */
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+ rq = intel_context_create_request(ce);
+ i915_active_release(&ce->active); /* e.g. async retire */
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+ GEM_BUG_ON(!rq->head);
+ i915_request_add(rq);
+
+ /* Expect not to hang! */
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int live_hold_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * In order to support offline error capture for fast preempt reset,
+ * we need to decouple the guilty request and ensure that it and its
+ * descendents are not executed while the capture is in progress.
+ */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ st_engine_heartbeat_disable(engine);
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out;
+ }
+
+ /* We have our request executing, now remove it and reset */
+
+ local_bh_disable();
+ if (test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags)) {
+ local_bh_enable();
+ intel_gt_set_wedged(gt);
+ err = -EBUSY;
+ goto out;
+ }
+ tasklet_disable(&engine->execlists.tasklet);
+
+ engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ i915_request_get(rq);
+ execlists_hold(engine, rq);
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ __intel_engine_reset_bh(engine, NULL);
+ GEM_BUG_ON(rq->fence.error != -EIO);
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags);
+ local_bh_enable();
+
+ /* Check that we do not resubmit the held request */
+ if (!i915_request_wait(rq, 0, HZ / 5)) {
+ pr_err("%s: on hold request completed!\n",
+ engine->name);
+ i915_request_put(rq);
+ err = -EIO;
+ goto out;
+ }
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ /* But is resubmitted on release */
+ execlists_unhold(engine, rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("%s: held request did not complete!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ }
+ i915_request_put(rq);
+
+out:
+ st_engine_heartbeat_enable(engine);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static const char *error_repr(int err)
+{
+ return err ? "bad" : "good";
+}
+
+static int live_error_interrupt(void *arg)
+{
+ static const struct error_phase {
+ enum { GOOD = 0, BAD = -EIO } error[2];
+ } phases[] = {
+ { { BAD, GOOD } },
+ { { BAD, BAD } },
+ { { BAD, GOOD } },
+ { { GOOD, GOOD } }, /* sentinel */
+ };
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning
+ * of invalid commands in user batches that will cause a GPU hang.
+ * This is a faster mechanism than using hangcheck/heartbeats, but
+ * only detects problems the HW knows about -- it will not warn when
+ * we kill the HW!
+ *
+ * To verify our detection and reset, we throw some invalid commands
+ * at the HW and wait for the interrupt.
+ */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ const struct error_phase *p;
+ int err = 0;
+
+ st_engine_heartbeat_disable(engine);
+
+ for (p = phases; p->error[0] != GOOD; p++) {
+ struct i915_request *client[ARRAY_SIZE(phases->error)];
+ u32 *cs;
+ int i;
+
+ memset(client, 0, sizeof(*client));
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ }
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto out;
+ }
+
+ if (p->error[i]) {
+ *cs++ = 0xdeadbeef;
+ *cs++ = 0xdeadbeef;
+ } else {
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+
+ client[i] = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+
+ err = wait_for_submit(engine, client[0], HZ / 2);
+ if (err) {
+ pr_err("%s: first request did not start within time!\n",
+ engine->name);
+ err = -ETIME;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (i915_request_wait(client[i], 0, HZ / 5) < 0)
+ pr_debug("%s: %s request incomplete!\n",
+ engine->name,
+ error_repr(p->error[i]));
+
+ if (!i915_request_started(client[i])) {
+ pr_err("%s: %s request not started!\n",
+ engine->name,
+ error_repr(p->error[i]));
+ err = -ETIME;
+ goto out;
+ }
+
+ /* Kick the tasklet to process the error */
+ intel_engine_flush_submission(engine);
+ if (client[i]->fence.error != p->error[i]) {
+ pr_err("%s: %s request (%s) with wrong error code: %d\n",
+ engine->name,
+ error_repr(p->error[i]),
+ i915_request_completed(client[i]) ? "completed" : "running",
+ client[i]->fence.error);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ for (i = 0; i < ARRAY_SIZE(client); i++)
+ if (client[i])
+ i915_request_put(client[i]);
+ if (err) {
+ pr_err("%s: failed at phase[%zd] { %d, %d }\n",
+ engine->name, p - phases,
+ p->error[0], p->error[1]);
+ break;
+ }
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (err) {
+ intel_gt_set_wedged(gt);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int
+emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 10);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(vma) + 4 * idx;
+ *cs++ = 0;
+
+ if (idx > 0) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
+ *cs++ = 0;
+ *cs++ = 1;
+ } else {
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ intel_ring_advance(rq, cs);
+ return 0;
+}
+
+static struct i915_request *
+semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ goto out_ce;
+
+ err = 0;
+ if (rq->engine->emit_init_breadcrumb)
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err == 0)
+ err = emit_semaphore_chain(rq, vma, idx);
+ if (err == 0)
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err)
+ rq = ERR_PTR(err);
+
+out_ce:
+ intel_context_put(ce);
+ return rq;
+}
+
+static int
+release_queue(struct intel_engine_cs *engine,
+ struct i915_vma *vma,
+ int idx, int prio)
+{
+ struct i915_sched_attr attr = {
+ .priority = prio,
+ };
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ intel_ring_advance(rq, cs);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ local_bh_disable();
+ engine->schedule(rq, &attr);
+ local_bh_enable(); /* kick tasklet */
+
+ i915_request_put(rq);
+
+ return 0;
+}
+
+static int
+slice_semaphore_queue(struct intel_engine_cs *outer,
+ struct i915_vma *vma,
+ int count)
+{
+ struct intel_engine_cs *engine;
+ struct i915_request *head;
+ enum intel_engine_id id;
+ int err, i, n = 0;
+
+ head = semaphore_queue(outer, vma, n++);
+ if (IS_ERR(head))
+ return PTR_ERR(head);
+
+ for_each_engine(engine, outer->gt, id) {
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ for (i = 0; i < count; i++) {
+ struct i915_request *rq;
+
+ rq = semaphore_queue(engine, vma, n++);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_put(rq);
+ }
+ }
+
+ err = release_queue(outer, vma, n, I915_PRIORITY_BARRIER);
+ if (err)
+ goto out;
+
+ if (i915_request_wait(head, 0,
+ 2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) {
+ pr_err("%s: Failed to slice along semaphore chain of length (%d, %d)!\n",
+ outer->name, count, n);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(outer->gt);
+ err = -EIO;
+ }
+
+out:
+ i915_request_put(head);
+ return err;
+}
+
+static int live_timeslice_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct i915_vma *vma;
+ void *vaddr;
+ int err = 0;
+
+ /*
+ * If a request takes too long, we would like to give other users
+ * a fair go on the GPU. In particular, users may create batches
+ * that wait upon external input, where that input may even be
+ * supplied by another GPU job. To avoid blocking forever, we
+ * need to preempt the current task and replace it with another
+ * ready task.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
+ for_each_engine(engine, gt, id) {
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ memset(vaddr, 0, PAGE_SIZE);
+
+ st_engine_heartbeat_disable(engine);
+ err = slice_semaphore_queue(engine, vma, 5);
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ goto err_pin;
+
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ goto err_pin;
+ }
+ }
+
+err_pin:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static struct i915_request *
+create_rewinder(struct intel_context *ce,
+ struct i915_request *wait,
+ void *slot, int idx)
+{
+ const u32 offset =
+ i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(slot);
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ if (wait) {
+ err = i915_request_await_dma_fence(rq, &wait->fence);
+ if (err)
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 14);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = idx;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
+ *cs++ = offset + idx * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = idx + 1;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_MASK;
+ err = 0;
+err:
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ return ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+static int live_timeslice_rewind(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * The usual presumption on timeslice expiration is that we replace
+ * the active context with another. However, given a chain of
+ * dependencies we may end up with replacing the context with itself,
+ * but only a few of those requests, forcing us to rewind the
+ * RING_TAIL of the original request.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ enum { A1, A2, B1 };
+ enum { X = 1, Z, Y };
+ struct i915_request *rq[3] = {};
+ struct intel_context *ce;
+ unsigned long timeslice;
+ int i, err = 0;
+ u32 *slot;
+
+ if (!intel_engine_has_timeslices(engine))
+ continue;
+
+ /*
+ * A:rq1 -- semaphore wait, timestamp X
+ * A:rq2 -- write timestamp Y
+ *
+ * B:rq1 [await A:rq1] -- write timestamp Z
+ *
+ * Force timeslice, release semaphore.
+ *
+ * Expect execution/evaluation order XZY
+ */
+
+ st_engine_heartbeat_disable(engine);
+ timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
+
+ slot = memset32(engine->status_page.addr + 1000, 0, 4);
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err;
+ }
+
+ rq[A1] = create_rewinder(ce, NULL, slot, X);
+ if (IS_ERR(rq[A1])) {
+ intel_context_put(ce);
+ goto err;
+ }
+
+ rq[A2] = create_rewinder(ce, NULL, slot, Y);
+ intel_context_put(ce);
+ if (IS_ERR(rq[A2]))
+ goto err;
+
+ err = wait_for_submit(engine, rq[A2], HZ / 2);
+ if (err) {
+ pr_err("%s: failed to submit first context\n",
+ engine->name);
+ goto err;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err;
+ }
+
+ rq[B1] = create_rewinder(ce, rq[A1], slot, Z);
+ intel_context_put(ce);
+ if (IS_ERR(rq[2]))
+ goto err;
+
+ err = wait_for_submit(engine, rq[B1], HZ / 2);
+ if (err) {
+ pr_err("%s: failed to submit second context\n",
+ engine->name);
+ goto err;
+ }
+
+ /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
+ ENGINE_TRACE(engine, "forcing tasklet for rewind\n");
+ while (i915_request_is_active(rq[A2])) { /* semaphore yield! */
+ /* Wait for the timeslice to kick in */
+ del_timer(&engine->execlists.timer);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ intel_engine_flush_submission(engine);
+ }
+ /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
+ GEM_BUG_ON(!i915_request_is_active(rq[A1]));
+ GEM_BUG_ON(!i915_request_is_active(rq[B1]));
+ GEM_BUG_ON(i915_request_is_active(rq[A2]));
+
+ /* Release the hounds! */
+ slot[0] = 1;
+ wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */
+
+ for (i = 1; i <= 3; i++) {
+ unsigned long timeout = jiffies + HZ / 2;
+
+ while (!READ_ONCE(slot[i]) &&
+ time_before(jiffies, timeout))
+ ;
+
+ if (!time_before(jiffies, timeout)) {
+ pr_err("%s: rq[%d] timed out\n",
+ engine->name, i - 1);
+ err = -ETIME;
+ goto err;
+ }
+
+ pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]);
+ }
+
+ /* XZY: XZ < XY */
+ if (slot[Z] - slot[X] >= slot[Y] - slot[X]) {
+ pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n",
+ engine->name,
+ slot[Z] - slot[X],
+ slot[Y] - slot[X]);
+ err = -EINVAL;
+ }
+
+err:
+ memset32(&slot[0], -1, 4);
+ wmb();
+
+ engine->props.timeslice_duration_ms = timeslice;
+ st_engine_heartbeat_enable(engine);
+ for (i = 0; i < 3; i++)
+ i915_request_put(rq[i]);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct i915_request *nop_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return rq;
+}
+
+static long slice_timeout(struct intel_engine_cs *engine)
+{
+ long timeout;
+
+ /* Enough time for a timeslice to kick in, and kick out */
+ timeout = 2 * msecs_to_jiffies_timeout(timeslice(engine));
+
+ /* Enough time for the nop request to complete */
+ timeout += HZ / 5;
+
+ return timeout + 1;
+}
+
+static int live_timeslice_queue(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct i915_vma *vma;
+ void *vaddr;
+ int err = 0;
+
+ /*
+ * Make sure that even if ELSP[0] and ELSP[1] are filled with
+ * timeslicing between them disabled, we *do* enable timeslicing
+ * if the queue demands it. (Normally, we do not submit if
+ * ELSP[1] is already occupied, so must rely on timeslicing to
+ * eject ELSP[0] in favour of the queue.)
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+ };
+ struct i915_request *rq, *nop;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ st_engine_heartbeat_disable(engine);
+ memset(vaddr, 0, PAGE_SIZE);
+
+ /* ELSP[0]: semaphore wait */
+ rq = semaphore_queue(engine, vma, 0);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_heartbeat;
+ }
+ engine->schedule(rq, &attr);
+ err = wait_for_submit(engine, rq, HZ / 2);
+ if (err) {
+ pr_err("%s: Timed out trying to submit semaphores\n",
+ engine->name);
+ goto err_rq;
+ }
+
+ /* ELSP[1]: nop request */
+ nop = nop_request(engine);
+ if (IS_ERR(nop)) {
+ err = PTR_ERR(nop);
+ goto err_rq;
+ }
+ err = wait_for_submit(engine, nop, HZ / 2);
+ i915_request_put(nop);
+ if (err) {
+ pr_err("%s: Timed out trying to submit nop\n",
+ engine->name);
+ goto err_rq;
+ }
+
+ GEM_BUG_ON(i915_request_completed(rq));
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ /* Queue: semaphore signal, matching priority as semaphore */
+ err = release_queue(engine, vma, 1, effective_prio(rq));
+ if (err)
+ goto err_rq;
+
+ /* Wait until we ack the release_queue and start timeslicing */
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+ } while (READ_ONCE(engine->execlists.pending[0]));
+
+ /* Timeslice every jiffy, so within 2 we should signal */
+ if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("%s: Failed to timeslice into queue\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ memset(vaddr, 0xff, PAGE_SIZE);
+ err = -EIO;
+ }
+err_rq:
+ i915_request_put(rq);
+err_heartbeat:
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ break;
+ }
+
+err_pin:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int live_timeslice_nopreempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * We should not timeslice into a request that is marked with
+ * I915_REQUEST_NOPREEMPT.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+ unsigned long timeslice;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ st_engine_heartbeat_disable(engine);
+ timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
+
+ /* Create an unpreemptible spinner */
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_heartbeat;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto out_spin;
+ }
+
+ set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
+ i915_request_put(rq);
+
+ /* Followed by a maximum priority barrier (heartbeat) */
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_spin;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_spin;
+ }
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ /*
+ * Wait until the barrier is in ELSP, and we know timeslicing
+ * will have been activated.
+ */
+ if (wait_for_submit(engine, rq, HZ / 2)) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto out_spin;
+ }
+
+ /*
+ * Since the ELSP[0] request is unpreemptible, it should not
+ * allow the maximum priority barrier through. Wait long
+ * enough to see if it is timesliced in by mistake.
+ */
+ if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) {
+ pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n",
+ engine->name);
+ err = -EINVAL;
+ }
+ i915_request_put(rq);
+
+out_spin:
+ igt_spinner_end(&spin);
+out_heartbeat:
+ xchg(&engine->props.timeslice_duration_ms, timeslice);
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ break;
+
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ break;
+ }
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_busywait_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+ u32 *map;
+
+ /*
+ * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
+ * preempt the busywaits used to synchronise between rings.
+ */
+
+ ctx_hi = kernel_context(gt->i915);
+ if (!ctx_hi)
+ return -ENOMEM;
+ ctx_hi->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+
+ ctx_lo = kernel_context(gt->i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_ctx_lo;
+ }
+
+ map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto err_obj;
+ }
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_map;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_vma;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *lo, *hi;
+ struct igt_live_test t;
+ u32 *cs;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_vma;
+ }
+
+ /*
+ * We create two requests. The low priority request
+ * busywaits on a semaphore (inside the ringbuffer where
+ * is should be preemptible) and the high priority requests
+ * uses a MI_STORE_DWORD_IMM to update the semaphore value
+ * allowing the first request to complete. If preemption
+ * fails, we hang instead.
+ */
+
+ lo = igt_request_alloc(ctx_lo, engine);
+ if (IS_ERR(lo)) {
+ err = PTR_ERR(lo);
+ goto err_vma;
+ }
+
+ cs = intel_ring_begin(lo, 8);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(lo);
+ goto err_vma;
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ /* XXX Do we need a flush + invalidate here? */
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+
+ intel_ring_advance(lo, cs);
+
+ i915_request_get(lo);
+ i915_request_add(lo);
+
+ if (wait_for(READ_ONCE(*map), 10)) {
+ i915_request_put(lo);
+ err = -ETIMEDOUT;
+ goto err_vma;
+ }
+
+ /* Low priority request should be busywaiting now */
+ if (i915_request_wait(lo, 0, 1) != -ETIME) {
+ i915_request_put(lo);
+ pr_err("%s: Busywaiting request did not!\n",
+ engine->name);
+ err = -EIO;
+ goto err_vma;
+ }
+
+ hi = igt_request_alloc(ctx_hi, engine);
+ if (IS_ERR(hi)) {
+ err = PTR_ERR(hi);
+ i915_request_put(lo);
+ goto err_vma;
+ }
+
+ cs = intel_ring_begin(hi, 4);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(hi);
+ i915_request_put(lo);
+ goto err_vma;
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+ *cs++ = 0;
+
+ intel_ring_advance(hi, cs);
+ i915_request_add(hi);
+
+ if (i915_request_wait(lo, 0, HZ / 5) < 0) {
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("%s: Failed to preempt semaphore busywait!\n",
+ engine->name);
+
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ GEM_TRACE_DUMP();
+
+ i915_request_put(lo);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_vma;
+ }
+ GEM_BUG_ON(READ_ONCE(*map));
+ i915_request_put(lo);
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_vma;
+ }
+ }
+
+ err = 0;
+err_vma:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+ return err;
+}
+
+static struct i915_request *
+spinner_create_request(struct igt_spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arb)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ rq = igt_spinner_create_request(spin, ce, arb);
+ intel_context_put(ce);
+ return rq;
+}
+
+static int live_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct igt_spinner spin_hi, spin_lo;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ if (igt_spinner_init(&spin_hi, gt))
+ return -ENOMEM;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ goto err_spin_hi;
+
+ ctx_hi = kernel_context(gt->i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+ ctx_hi->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+
+ ctx_lo = kernel_context(gt->i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+
+ for_each_engine(engine, gt, id) {
+ struct igt_live_test t;
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
+ GEM_TRACE("lo spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ igt_spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
+ GEM_TRACE("hi spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
+err_spin_hi:
+ igt_spinner_fini(&spin_hi);
+ return err;
+}
+
+static int live_late_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct igt_spinner spin_hi, spin_lo;
+ struct intel_engine_cs *engine;
+ struct i915_sched_attr attr = {};
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ if (igt_spinner_init(&spin_hi, gt))
+ return -ENOMEM;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ goto err_spin_hi;
+
+ ctx_hi = kernel_context(gt->i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+
+ ctx_lo = kernel_context(gt->i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+
+ /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
+ ctx_lo->sched.priority = I915_USER_PRIORITY(1);
+
+ for_each_engine(engine, gt, id) {
+ struct igt_live_test t;
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
+ pr_err("First context failed to start\n");
+ goto err_wedged;
+ }
+
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ igt_spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (igt_wait_for_spinner(&spin_hi, rq)) {
+ pr_err("Second context overtook first?\n");
+ goto err_wedged;
+ }
+
+ attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
+ engine->schedule(rq, &attr);
+
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
+ pr_err("High priority context failed to preempt the low priority context\n");
+ GEM_TRACE_DUMP();
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
+err_spin_hi:
+ igt_spinner_fini(&spin_hi);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_ctx_lo;
+}
+
+struct preempt_client {
+ struct igt_spinner spin;
+ struct i915_gem_context *ctx;
+};
+
+static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c)
+{
+ c->ctx = kernel_context(gt->i915);
+ if (!c->ctx)
+ return -ENOMEM;
+
+ if (igt_spinner_init(&c->spin, gt))
+ goto err_ctx;
+
+ return 0;
+
+err_ctx:
+ kernel_context_close(c->ctx);
+ return -ENOMEM;
+}
+
+static void preempt_client_fini(struct preempt_client *c)
+{
+ igt_spinner_fini(&c->spin);
+ kernel_context_close(c->ctx);
+}
+
+static int live_nopreempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct preempt_client a, b;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Verify that we can disable preemption for an individual request
+ * that may be being observed and not want to be interrupted.
+ */
+
+ if (preempt_client_init(gt, &a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &b))
+ goto err_client_a;
+ b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq_a, *rq_b;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ engine->execlists.preempt_hang.count = 0;
+
+ rq_a = spinner_create_request(&a.spin,
+ a.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq_a)) {
+ err = PTR_ERR(rq_a);
+ goto err_client_b;
+ }
+
+ /* Low priority client, but unpreemptable! */
+ __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
+
+ i915_request_add(rq_a);
+ if (!igt_wait_for_spinner(&a.spin, rq_a)) {
+ pr_err("First client failed to start\n");
+ goto err_wedged;
+ }
+
+ rq_b = spinner_create_request(&b.spin,
+ b.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq_b)) {
+ err = PTR_ERR(rq_b);
+ goto err_client_b;
+ }
+
+ i915_request_add(rq_b);
+
+ /* B is much more important than A! (But A is unpreemptable.) */
+ GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
+
+ /* Wait long enough for preemption and timeslicing */
+ if (igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client started too early!\n");
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&a.spin);
+
+ if (!igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client failed to start\n");
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&b.spin);
+
+ if (engine->execlists.preempt_hang.count) {
+ pr_err("Preemption recorded x%d; should have been suppressed!\n",
+ engine->execlists.preempt_hang.count);
+ err = -EINVAL;
+ goto err_wedged;
+ }
+
+ if (igt_flush_test(gt->i915))
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&b);
+err_client_a:
+ preempt_client_fini(&a);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&b.spin);
+ igt_spinner_end(&a.spin);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_client_b;
+}
+
+struct live_preempt_cancel {
+ struct intel_engine_cs *engine;
+ struct preempt_client a, b;
+};
+
+static int __cancel_active0(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int err;
+
+ /* Preempt cancel of ELSP0 */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ intel_context_set_banned(rq->context);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ err = wait_for_reset(arg->engine, rq, HZ / 2);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_active1(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq[2] = {};
+ struct igt_live_test t;
+ int err;
+
+ /* Preempt cancel of ELSP1 */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ rq[0] = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_NOOP); /* no preemption */
+ if (IS_ERR(rq[0]))
+ return PTR_ERR(rq[0]);
+
+ clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ rq[1] = spinner_create_request(&arg->b.spin,
+ arg->b.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ goto out;
+ }
+
+ clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
+ i915_request_get(rq[1]);
+ err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ i915_request_add(rq[1]);
+ if (err)
+ goto out;
+
+ intel_context_set_banned(rq[1]->context);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ igt_spinner_end(&arg->a.spin);
+ err = wait_for_reset(arg->engine, rq[1], HZ / 2);
+ if (err)
+ goto out;
+
+ if (rq[0]->fence.error != 0) {
+ pr_err("Normal inflight0 request did not complete\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[1]->fence.error != -EIO) {
+ pr_err("Cancelled inflight1 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_queued(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq[3] = {};
+ struct igt_live_test t;
+ int err;
+
+ /* Full ELSP and one in the wings */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ rq[0] = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[0]))
+ return PTR_ERR(rq[0]);
+
+ clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ goto out;
+ }
+
+ clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
+ i915_request_get(rq[1]);
+ err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ i915_request_add(rq[1]);
+ if (err)
+ goto out;
+
+ rq[2] = spinner_create_request(&arg->b.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[2])) {
+ err = PTR_ERR(rq[2]);
+ goto out;
+ }
+
+ i915_request_get(rq[2]);
+ err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
+ i915_request_add(rq[2]);
+ if (err)
+ goto out;
+
+ intel_context_set_banned(rq[2]->context);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ err = wait_for_reset(arg->engine, rq[2], HZ / 2);
+ if (err)
+ goto out;
+
+ if (rq[0]->fence.error != -EIO) {
+ pr_err("Cancelled inflight0 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[1]->fence.error != 0) {
+ pr_err("Normal inflight1 request did not complete\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[2]->fence.error != -EIO) {
+ pr_err("Cancelled queued request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq[2]);
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_hostile(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq;
+ int err;
+
+ /* Preempt cancel non-preemptible spinner in ELSP0 */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return 0;
+
+ if (!intel_has_reset_engine(arg->engine->gt))
+ return 0;
+
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ intel_context_set_banned(rq->context);
+ err = intel_engine_pulse(arg->engine); /* force reset */
+ if (err)
+ goto out;
+
+ err = wait_for_reset(arg->engine, rq, HZ / 2);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_flush_test(arg->engine->i915))
+ err = -EIO;
+ return err;
+}
+
+static void force_reset_timeout(struct intel_engine_cs *engine)
+{
+ engine->reset_timeout.probability = 999;
+ atomic_set(&engine->reset_timeout.times, -1);
+}
+
+static void cancel_reset_timeout(struct intel_engine_cs *engine)
+{
+ memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout));
+}
+
+static int __cancel_fail(struct live_preempt_cancel *arg)
+{
+ struct intel_engine_cs *engine = arg->engine;
+ struct i915_request *rq;
+ int err;
+
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return 0;
+
+ if (!intel_has_reset_engine(engine->gt))
+ return 0;
+
+ GEM_TRACE("%s(%s)\n", __func__, engine->name);
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ intel_context_set_banned(rq->context);
+
+ err = intel_engine_pulse(engine);
+ if (err)
+ goto out;
+
+ force_reset_timeout(engine);
+
+ /* force preempt reset [failure] */
+ while (!engine->execlists.pending[0])
+ intel_engine_flush_submission(engine);
+ del_timer_sync(&engine->execlists.preempt);
+ intel_engine_flush_submission(engine);
+
+ cancel_reset_timeout(engine);
+
+ /* after failure, require heartbeats to reset device */
+ intel_engine_set_heartbeat(engine, 1);
+ err = wait_for_reset(engine, rq, HZ / 2);
+ intel_engine_set_heartbeat(engine,
+ engine->defaults.heartbeat_interval_ms);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_flush_test(engine->i915))
+ err = -EIO;
+ return err;
+}
+
+static int live_preempt_cancel(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct live_preempt_cancel data;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * To cancel an inflight context, we need to first remove it from the
+ * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
+ */
+
+ if (preempt_client_init(gt, &data.a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &data.b))
+ goto err_client_a;
+
+ for_each_engine(data.engine, gt, id) {
+ if (!intel_engine_has_preemption(data.engine))
+ continue;
+
+ err = __cancel_active0(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_active1(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_queued(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_hostile(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_fail(&data);
+ if (err)
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&data.b);
+err_client_a:
+ preempt_client_fini(&data.a);
+ return err;
+
+err_wedged:
+ GEM_TRACE_DUMP();
+ igt_spinner_end(&data.b.spin);
+ igt_spinner_end(&data.a.spin);
+ intel_gt_set_wedged(gt);
+ goto err_client_b;
+}
+
+static int live_suppress_self_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
+ };
+ struct preempt_client a, b;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Verify that if a preemption request does not cause a change in
+ * the current execution order, the preempt-to-idle injection is
+ * skipped and that we do not accidentally apply it after the CS
+ * completion event.
+ */
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0; /* presume black blox */
+
+ if (intel_vgpu_active(gt->i915))
+ return 0; /* GVT forces single port & request submission */
+
+ if (preempt_client_init(gt, &a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &b))
+ goto err_client_a;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq_a, *rq_b;
+ int depth;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_flush_test(gt->i915))
+ goto err_wedged;
+
+ st_engine_heartbeat_disable(engine);
+ engine->execlists.preempt_hang.count = 0;
+
+ rq_a = spinner_create_request(&a.spin,
+ a.ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(rq_a)) {
+ err = PTR_ERR(rq_a);
+ st_engine_heartbeat_enable(engine);
+ goto err_client_b;
+ }
+
+ i915_request_add(rq_a);
+ if (!igt_wait_for_spinner(&a.spin, rq_a)) {
+ pr_err("First client failed to start\n");
+ st_engine_heartbeat_enable(engine);
+ goto err_wedged;
+ }
+
+ /* Keep postponing the timer to avoid premature slicing */
+ mod_timer(&engine->execlists.timer, jiffies + HZ);
+ for (depth = 0; depth < 8; depth++) {
+ rq_b = spinner_create_request(&b.spin,
+ b.ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(rq_b)) {
+ err = PTR_ERR(rq_b);
+ st_engine_heartbeat_enable(engine);
+ goto err_client_b;
+ }
+ i915_request_add(rq_b);
+
+ GEM_BUG_ON(i915_request_completed(rq_a));
+ engine->schedule(rq_a, &attr);
+ igt_spinner_end(&a.spin);
+
+ if (!igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client failed to start\n");
+ st_engine_heartbeat_enable(engine);
+ goto err_wedged;
+ }
+
+ swap(a, b);
+ rq_a = rq_b;
+ }
+ igt_spinner_end(&a.spin);
+
+ if (engine->execlists.preempt_hang.count) {
+ pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n",
+ engine->name,
+ engine->execlists.preempt_hang.count,
+ depth);
+ st_engine_heartbeat_enable(engine);
+ err = -EINVAL;
+ goto err_client_b;
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (igt_flush_test(gt->i915))
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&b);
+err_client_a:
+ preempt_client_fini(&a);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&b.spin);
+ igt_spinner_end(&a.spin);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_client_b;
+}
+
+static int live_chain_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct preempt_client hi, lo;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Build a chain AB...BA between two contexts (A, B) and request
+ * preemption of the last request. It should then complete before
+ * the previously submitted spinner in B.
+ */
+
+ if (preempt_client_init(gt, &hi))
+ return -ENOMEM;
+
+ if (preempt_client_init(gt, &lo))
+ goto err_client_hi;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+ };
+ struct igt_live_test t;
+ struct i915_request *rq;
+ int ring_size, count, i;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ rq = spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ ring_size = rq->wa_tail - rq->head;
+ if (ring_size < 0)
+ ring_size += rq->ring->size;
+ ring_size = rq->ring->size / ring_size;
+ pr_debug("%s(%s): Using maximum of %d requests\n",
+ __func__, engine->name, ring_size);
+
+ igt_spinner_end(&lo.spin);
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ pr_err("Timed out waiting to flush %s\n", engine->name);
+ i915_request_put(rq);
+ goto err_wedged;
+ }
+ i915_request_put(rq);
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_wedged;
+ }
+
+ for_each_prime_number_from(count, 1, ring_size) {
+ rq = spinner_create_request(&hi.spin,
+ hi.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&hi.spin, rq))
+ goto err_wedged;
+
+ rq = spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+
+ for (i = 0; i < count; i++) {
+ rq = igt_request_alloc(lo.ctx, engine);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+ }
+
+ rq = igt_request_alloc(hi.ctx, engine);
+ if (IS_ERR(rq))
+ goto err_wedged;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ engine->schedule(rq, &attr);
+
+ igt_spinner_end(&hi.spin);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("Failed to preempt over chain of %d\n",
+ count);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+ i915_request_put(rq);
+ goto err_wedged;
+ }
+ igt_spinner_end(&lo.spin);
+ i915_request_put(rq);
+
+ rq = igt_request_alloc(lo.ctx, engine);
+ if (IS_ERR(rq))
+ goto err_wedged;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("Failed to flush low priority chain of %d requests\n",
+ count);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ i915_request_put(rq);
+ goto err_wedged;
+ }
+ i915_request_put(rq);
+ }
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_wedged;
+ }
+ }
+
+ err = 0;
+err_client_lo:
+ preempt_client_fini(&lo);
+err_client_hi:
+ preempt_client_fini(&hi);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&hi.spin);
+ igt_spinner_end(&lo.spin);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_client_lo;
+}
+
+static int create_gang(struct intel_engine_cs *engine,
+ struct i915_request **prev)
+{
+ struct drm_i915_gem_object *obj;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_ce;
+ }
+
+ vma = i915_vma_instance(obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_obj;
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_obj;
+ }
+
+ /* Semaphore target: spin until zero */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = lower_32_bits(vma->node.start);
+ *cs++ = upper_32_bits(vma->node.start);
+
+ if (*prev) {
+ u64 offset = (*prev)->batch->node.start;
+
+ /* Terminate the spinner in the next lower priority batch. */
+ *cs++ = MI_STORE_DWORD_IMM_GEN4;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = 0;
+ }
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_obj;
+ }
+
+ rq->batch = i915_vma_get(vma);
+ i915_request_get(rq);
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(vma);
+ i915_request_add(rq);
+ if (err)
+ goto err_rq;
+
+ i915_gem_object_put(obj);
+ intel_context_put(ce);
+
+ rq->mock.link.next = &(*prev)->mock.link;
+ *prev = rq;
+ return 0;
+
+err_rq:
+ i915_vma_put(rq->batch);
+ i915_request_put(rq);
+err_obj:
+ i915_gem_object_put(obj);
+err_ce:
+ intel_context_put(ce);
+ return err;
+}
+
+static int __live_preempt_ring(struct intel_engine_cs *engine,
+ struct igt_spinner *spin,
+ int queue_sz, int ring_sz)
+{
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int err = 0;
+ int n;
+
+ if (igt_live_test_begin(&t, engine->i915, __func__, engine->name))
+ return -EIO;
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ tmp->ring = __intel_context_ring_size(ring_sz);
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ memset32(tmp->ring->vaddr,
+ 0xdeadbeef, /* trigger a hang if executed */
+ tmp->ring->vma->size / sizeof(u32));
+
+ ce[n] = tmp;
+ }
+
+ rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ i915_request_get(rq);
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(spin, rq)) {
+ intel_gt_set_wedged(engine->gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ce;
+ }
+
+ /* Fill the ring, until we will cause a wrap */
+ n = 0;
+ while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
+ struct i915_request *tmp;
+
+ tmp = intel_context_create_request(ce[0]);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ i915_request_put(rq);
+ goto err_ce;
+ }
+
+ i915_request_add(tmp);
+ intel_engine_flush_submission(engine);
+ n++;
+ }
+ intel_engine_flush_submission(engine);
+ pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
+ engine->name, queue_sz, n,
+ ce[0]->ring->size,
+ ce[0]->ring->tail,
+ ce[0]->ring->emit,
+ rq->tail);
+ i915_request_put(rq);
+
+ /* Create a second request to preempt the first ring */
+ rq = intel_context_create_request(ce[1]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = wait_for_submit(engine, rq, HZ / 2);
+ i915_request_put(rq);
+ if (err) {
+ pr_err("%s: preemption request was not submited\n",
+ engine->name);
+ err = -ETIME;
+ }
+
+ pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
+ engine->name,
+ ce[0]->ring->tail, ce[0]->ring->emit,
+ ce[1]->ring->tail, ce[1]->ring->emit);
+
+err_ce:
+ intel_engine_flush_submission(engine);
+ igt_spinner_end(spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int live_preempt_ring(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct igt_spinner spin;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check that we rollback large chunks of a ring in order to do a
+ * preemption event. Similar to live_unlite_ring, but looking at
+ * ring size rather than the impact of intel_ring_direction().
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ int n;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ st_engine_heartbeat_disable(engine);
+
+ for (n = 0; n <= 3; n++) {
+ err = __live_preempt_ring(engine, &spin,
+ n * SZ_4K / 4, SZ_4K);
+ if (err)
+ break;
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_preempt_gang(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Build as long a chain of preempters as we can, with each
+ * request higher priority than the last. Once we are ready, we release
+ * the last batch which then precolates down the chain, each releasing
+ * the next oldest in turn. The intent is to simply push as hard as we
+ * can with the number of preemptions, trying to exceed narrow HW
+ * limits. At a minimum, we insist that we can sort all the user
+ * high priority levels into execution order.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq = NULL;
+ struct igt_live_test t;
+ IGT_TIMEOUT(end_time);
+ int prio = 0;
+ int err = 0;
+ u32 *cs;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name))
+ return -EIO;
+
+ do {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(prio++),
+ };
+
+ err = create_gang(engine, &rq);
+ if (err)
+ break;
+
+ /* Submit each spinner at increasing priority */
+ engine->schedule(rq, &attr);
+ } while (prio <= I915_PRIORITY_MAX &&
+ !__igt_timeout(end_time, NULL));
+ pr_debug("%s: Preempt chain of %d requests\n",
+ engine->name, prio);
+
+ /*
+ * Such that the last spinner is the highest priority and
+ * should execute first. When that spinner completes,
+ * it will terminate the next lowest spinner until there
+ * are no more spinners and the gang is complete.
+ */
+ cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC);
+ if (!IS_ERR(cs)) {
+ *cs = 0;
+ i915_gem_object_unpin_map(rq->batch->obj);
+ } else {
+ err = PTR_ERR(cs);
+ intel_gt_set_wedged(gt);
+ }
+
+ while (rq) { /* wait for each rq from highest to lowest prio */
+ struct i915_request *n = list_next_entry(rq, mock.link);
+
+ if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("Failed to flush chain of %d requests, at %d\n",
+ prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ err = -ETIME;
+ }
+
+ i915_vma_put(rq->batch);
+ i915_request_put(rq);
+ rq = n;
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct i915_vma *
+create_gpr_user(struct intel_engine_cs *engine,
+ struct i915_vma *result,
+ unsigned int offset)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+ int i;
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, result->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_vma_put(vma);
+ return ERR_CAST(cs);
+ }
+
+ /* All GPR are clear for new contexts. We use GPR(0) as a constant */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = CS_GPR(engine, 0);
+ *cs++ = 1;
+
+ for (i = 1; i < NUM_GPR; i++) {
+ u64 addr;
+
+ /*
+ * Perform: GPR[i]++
+ *
+ * As we read and write into the context saved GPR[i], if
+ * we restart this batch buffer from an earlier point, we
+ * will repeat the increment and store a value > 1.
+ */
+ *cs++ = MI_MATH(4);
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i));
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0));
+ *cs++ = MI_MATH_ADD;
+ *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
+
+ addr = result->node.start + offset + i * sizeof(*cs);
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8;
+ *cs++ = CS_GPR(engine, 2 * i);
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = i;
+ *cs++ = lower_32_bits(result->node.start);
+ *cs++ = upper_32_bits(result->node.start);
+ }
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ return vma;
+}
+
+static struct i915_vma *create_global(struct intel_gt *gt, size_t sz)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(gt->i915, sz);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_ggtt_pin(vma, NULL, 0, 0);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static struct i915_request *
+create_gpr_client(struct intel_engine_cs *engine,
+ struct i915_vma *global,
+ unsigned int offset)
+{
+ struct i915_vma *batch, *vma;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ vma = i915_vma_instance(global->obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_ce;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_ce;
+
+ batch = create_gpr_user(engine, vma, offset);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_vma;
+ }
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_batch;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
+
+ i915_vma_lock(batch);
+ if (!err)
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(batch);
+ i915_vma_unpin(batch);
+
+ if (!err)
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+out_batch:
+ i915_vma_put(batch);
+out_vma:
+ i915_vma_unpin(vma);
+out_ce:
+ intel_context_put(ce);
+ return err ? ERR_PTR(err) : rq;
+}
+
+static int preempt_user(struct intel_engine_cs *engine,
+ struct i915_vma *global,
+ int id)
+{
+ struct i915_sched_attr attr = {
+ .priority = I915_PRIORITY_MAX
+ };
+ struct i915_request *rq;
+ int err = 0;
+ u32 *cs;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(global);
+ *cs++ = 0;
+ *cs++ = id;
+
+ intel_ring_advance(rq, cs);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ engine->schedule(rq, &attr);
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int live_preempt_user(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_vma *global;
+ enum intel_engine_id id;
+ u32 *result;
+ int err = 0;
+
+ /*
+ * In our other tests, we look at preemption in carefully
+ * controlled conditions in the ringbuffer. Since most of the
+ * time is spent in user batches, most of our preemptions naturally
+ * occur there. We want to verify that when we preempt inside a batch
+ * we continue on from the current instruction and do not roll back
+ * to the start, or another earlier arbitration point.
+ *
+ * To verify this, we create a batch which is a mixture of
+ * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with
+ * a few preempting contexts thrown into the mix, we look for any
+ * repeated instructions (which show up as incorrect values).
+ */
+
+ global = create_global(gt, 4096);
+ if (IS_ERR(global))
+ return PTR_ERR(global);
+
+ result = i915_gem_object_pin_map(global->obj, I915_MAP_WC);
+ if (IS_ERR(result)) {
+ i915_vma_unpin_and_release(&global, 0);
+ return PTR_ERR(result);
+ }
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *client[3] = {};
+ struct igt_live_test t;
+ int i;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS)
+ continue; /* we need per-context GPR */
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+
+ memset(result, 0, 4096);
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct i915_request *rq;
+
+ rq = create_gpr_client(engine, global,
+ NUM_GPR * i * sizeof(u32));
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto end_test;
+ }
+
+ client[i] = rq;
+ }
+
+ /* Continuously preempt the set of 3 running contexts */
+ for (i = 1; i <= NUM_GPR; i++) {
+ err = preempt_user(engine, global, i);
+ if (err)
+ goto end_test;
+ }
+
+ if (READ_ONCE(result[0]) != NUM_GPR) {
+ pr_err("%s: Failed to release semaphore\n",
+ engine->name);
+ err = -EIO;
+ goto end_test;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ int gpr;
+
+ if (i915_request_wait(client[i], 0, HZ / 2) < 0) {
+ err = -ETIME;
+ goto end_test;
+ }
+
+ for (gpr = 1; gpr < NUM_GPR; gpr++) {
+ if (result[NUM_GPR * i + gpr] != 1) {
+ pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n",
+ engine->name,
+ i, gpr, result[NUM_GPR * i + gpr]);
+ err = -EINVAL;
+ goto end_test;
+ }
+ }
+ }
+
+end_test:
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (!client[i])
+ break;
+
+ i915_request_put(client[i]);
+ }
+
+ /* Flush the semaphores on error */
+ smp_store_mb(result[0], -1);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP);
+ return err;
+}
+
+static int live_preempt_timeout(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct igt_spinner spin_lo;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Check that we force preemption to occur by cancelling the previous
+ * context if it refuses to yield the GPU.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ return -ENOMEM;
+
+ ctx_hi = kernel_context(gt->i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+ ctx_hi->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+
+ ctx_lo = kernel_context(gt->i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+
+ for_each_engine(engine, gt, id) {
+ unsigned long saved_timeout;
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = igt_request_alloc(ctx_hi, engine);
+ if (IS_ERR(rq)) {
+ igt_spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ /* Flush the previous CS ack before changing timeouts */
+ while (READ_ONCE(engine->execlists.pending[0]))
+ cpu_relax();
+
+ saved_timeout = engine->props.preempt_timeout_ms;
+ engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ intel_engine_flush_submission(engine);
+ engine->props.preempt_timeout_ms = saved_timeout;
+
+ if (i915_request_wait(rq, 0, HZ / 10) < 0) {
+ intel_gt_set_wedged(gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ctx_lo;
+ }
+
+ igt_spinner_end(&spin_lo);
+ i915_request_put(rq);
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
+ return err;
+}
+
+static int random_range(struct rnd_state *rnd, int min, int max)
+{
+ return i915_prandom_u32_max_state(max - min, rnd) + min;
+}
+
+static int random_priority(struct rnd_state *rnd)
+{
+ return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
+}
+
+struct preempt_smoke {
+ struct intel_gt *gt;
+ struct i915_gem_context **contexts;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *batch;
+ unsigned int ncontext;
+ struct rnd_state prng;
+ unsigned long count;
+};
+
+static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
+{
+ return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
+ &smoke->prng)];
+}
+
+static int smoke_submit(struct preempt_smoke *smoke,
+ struct i915_gem_context *ctx, int prio,
+ struct drm_i915_gem_object *batch)
+{
+ struct i915_request *rq;
+ struct i915_vma *vma = NULL;
+ int err = 0;
+
+ if (batch) {
+ struct i915_address_space *vm;
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(batch, vm, NULL);
+ i915_vm_put(vm);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+ }
+
+ ctx->sched.priority = prio;
+
+ rq = igt_request_alloc(ctx, smoke->engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto unpin;
+ }
+
+ if (vma) {
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(vma);
+ }
+
+ i915_request_add(rq);
+
+unpin:
+ if (vma)
+ i915_vma_unpin(vma);
+
+ return err;
+}
+
+static int smoke_crescendo_thread(void *arg)
+{
+ struct preempt_smoke *smoke = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ struct i915_gem_context *ctx = smoke_context(smoke);
+ int err;
+
+ err = smoke_submit(smoke,
+ ctx, count % I915_PRIORITY_MAX,
+ smoke->batch);
+ if (err)
+ return err;
+
+ count++;
+ } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
+
+ smoke->count = count;
+ return 0;
+}
+
+static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
+#define BATCH BIT(0)
+{
+ struct task_struct *tsk[I915_NUM_ENGINES] = {};
+ struct preempt_smoke arg[I915_NUM_ENGINES];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long count;
+ int err = 0;
+
+ for_each_engine(engine, smoke->gt, id) {
+ arg[id] = *smoke;
+ arg[id].engine = engine;
+ if (!(flags & BATCH))
+ arg[id].batch = NULL;
+ arg[id].count = 0;
+
+ tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
+ "igt/smoke:%d", id);
+ if (IS_ERR(tsk[id])) {
+ err = PTR_ERR(tsk[id]);
+ break;
+ }
+ get_task_struct(tsk[id]);
+ }
+
+ yield(); /* start all threads before we kthread_stop() */
+
+ count = 0;
+ for_each_engine(engine, smoke->gt, id) {
+ int status;
+
+ if (IS_ERR_OR_NULL(tsk[id]))
+ continue;
+
+ status = kthread_stop(tsk[id]);
+ if (status && !err)
+ err = status;
+
+ count += arg[id].count;
+
+ put_task_struct(tsk[id]);
+ }
+
+ pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
+ count, flags, smoke->gt->info.num_engines, smoke->ncontext);
+ return 0;
+}
+
+static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
+{
+ enum intel_engine_id id;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ for_each_engine(smoke->engine, smoke->gt, id) {
+ struct i915_gem_context *ctx = smoke_context(smoke);
+ int err;
+
+ err = smoke_submit(smoke,
+ ctx, random_priority(&smoke->prng),
+ flags & BATCH ? smoke->batch : NULL);
+ if (err)
+ return err;
+
+ count++;
+ }
+ } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
+
+ pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
+ count, flags, smoke->gt->info.num_engines, smoke->ncontext);
+ return 0;
+}
+
+static int live_preempt_smoke(void *arg)
+{
+ struct preempt_smoke smoke = {
+ .gt = arg,
+ .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
+ .ncontext = 256,
+ };
+ const unsigned int phase[] = { 0, BATCH };
+ struct igt_live_test t;
+ int err = -ENOMEM;
+ u32 *cs;
+ int n;
+
+ smoke.contexts = kmalloc_array(smoke.ncontext,
+ sizeof(*smoke.contexts),
+ GFP_KERNEL);
+ if (!smoke.contexts)
+ return -ENOMEM;
+
+ smoke.batch =
+ i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE);
+ if (IS_ERR(smoke.batch)) {
+ err = PTR_ERR(smoke.batch);
+ goto err_free;
+ }
+
+ cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_batch;
+ }
+ for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
+ cs[n] = MI_ARB_CHECK;
+ cs[n] = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(smoke.batch);
+ i915_gem_object_unpin_map(smoke.batch);
+
+ if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) {
+ err = -EIO;
+ goto err_batch;
+ }
+
+ for (n = 0; n < smoke.ncontext; n++) {
+ smoke.contexts[n] = kernel_context(smoke.gt->i915);
+ if (!smoke.contexts[n])
+ goto err_ctx;
+ }
+
+ for (n = 0; n < ARRAY_SIZE(phase); n++) {
+ err = smoke_crescendo(&smoke, phase[n]);
+ if (err)
+ goto err_ctx;
+
+ err = smoke_random(&smoke, phase[n]);
+ if (err)
+ goto err_ctx;
+ }
+
+err_ctx:
+ if (igt_live_test_end(&t))
+ err = -EIO;
+
+ for (n = 0; n < smoke.ncontext; n++) {
+ if (!smoke.contexts[n])
+ break;
+ kernel_context_close(smoke.contexts[n]);
+ }
+
+err_batch:
+ i915_gem_object_put(smoke.batch);
+err_free:
+ kfree(smoke.contexts);
+
+ return err;
+}
+
+static int nop_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling,
+ unsigned int nctx,
+ unsigned int flags)
+#define CHAIN BIT(0)
+{
+ IGT_TIMEOUT(end_time);
+ struct i915_request *request[16] = {};
+ struct intel_context *ve[16];
+ unsigned long n, prime, nc;
+ struct igt_live_test t;
+ ktime_t times[2] = {};
+ int err;
+
+ GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
+
+ for (n = 0; n < nctx; n++) {
+ ve[n] = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve[n])) {
+ err = PTR_ERR(ve[n]);
+ nctx = n;
+ goto out;
+ }
+
+ err = intel_context_pin(ve[n]);
+ if (err) {
+ intel_context_put(ve[n]);
+ nctx = n;
+ goto out;
+ }
+ }
+
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name);
+ if (err)
+ goto out;
+
+ for_each_prime_number_from(prime, 1, 8192) {
+ times[1] = ktime_get_raw();
+
+ if (flags & CHAIN) {
+ for (nc = 0; nc < nctx; nc++) {
+ for (n = 0; n < prime; n++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve[nc]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (request[nc])
+ i915_request_put(request[nc]);
+ request[nc] = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+ }
+ } else {
+ for (n = 0; n < prime; n++) {
+ for (nc = 0; nc < nctx; nc++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve[nc]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (request[nc])
+ i915_request_put(request[nc]);
+ request[nc] = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+ }
+ }
+
+ for (nc = 0; nc < nctx; nc++) {
+ if (i915_request_wait(request[nc], 0, HZ / 10) < 0) {
+ pr_err("%s(%s): wait for %llx:%lld timed out\n",
+ __func__, ve[0]->engine->name,
+ request[nc]->fence.context,
+ request[nc]->fence.seqno);
+
+ GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
+ __func__, ve[0]->engine->name,
+ request[nc]->fence.context,
+ request[nc]->fence.seqno);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ break;
+ }
+ }
+
+ times[1] = ktime_sub(ktime_get_raw(), times[1]);
+ if (prime == 1)
+ times[0] = times[1];
+
+ for (nc = 0; nc < nctx; nc++) {
+ i915_request_put(request[nc]);
+ request[nc] = NULL;
+ }
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+ }
+
+ err = igt_live_test_end(&t);
+ if (err)
+ goto out;
+
+ pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
+ nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
+ prime, div64_u64(ktime_to_ns(times[1]), prime));
+
+out:
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ for (nc = 0; nc < nctx; nc++) {
+ i915_request_put(request[nc]);
+ intel_context_unpin(ve[nc]);
+ intel_context_put(ve[nc]);
+ }
+ return err;
+}
+
+static unsigned int
+__select_siblings(struct intel_gt *gt,
+ unsigned int class,
+ struct intel_engine_cs **siblings,
+ bool (*filter)(const struct intel_engine_cs *))
+{
+ unsigned int n = 0;
+ unsigned int inst;
+
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!gt->engine_class[class][inst])
+ continue;
+
+ if (filter && !filter(gt->engine_class[class][inst]))
+ continue;
+
+ siblings[n++] = gt->engine_class[class][inst];
+ }
+
+ return n;
+}
+
+static unsigned int
+select_siblings(struct intel_gt *gt,
+ unsigned int class,
+ struct intel_engine_cs **siblings)
+{
+ return __select_siblings(gt, class, siblings, NULL);
+}
+
+static int live_virtual_engine(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ err = nop_virtual_engine(gt, &engine, 1, 1, 0);
+ if (err) {
+ pr_err("Failed to wrap engine %s: err=%d\n",
+ engine->name, err);
+ return err;
+ }
+ }
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, n;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ for (n = 1; n <= nsibling + 1; n++) {
+ err = nop_virtual_engine(gt, siblings, nsibling,
+ n, 0);
+ if (err)
+ return err;
+ }
+
+ err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mask_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
+ struct intel_context *ve;
+ struct igt_live_test t;
+ unsigned int n;
+ int err;
+
+ /*
+ * Check that by setting the execution mask on a request, we can
+ * restrict it to our desired engine within the virtual engine.
+ */
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_close;
+ }
+
+ err = intel_context_pin(ve);
+ if (err)
+ goto out_put;
+
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
+ if (err)
+ goto out_unpin;
+
+ for (n = 0; n < nsibling; n++) {
+ request[n] = i915_request_create(ve);
+ if (IS_ERR(request[n])) {
+ err = PTR_ERR(request[n]);
+ nsibling = n;
+ goto out;
+ }
+
+ /* Reverse order as it's more likely to be unnatural */
+ request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
+
+ i915_request_get(request[n]);
+ i915_request_add(request[n]);
+ }
+
+ for (n = 0; n < nsibling; n++) {
+ if (i915_request_wait(request[n], 0, HZ / 10) < 0) {
+ pr_err("%s(%s): wait for %llx:%lld timed out\n",
+ __func__, ve->engine->name,
+ request[n]->fence.context,
+ request[n]->fence.seqno);
+
+ GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
+ __func__, ve->engine->name,
+ request[n]->fence.context,
+ request[n]->fence.seqno);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto out;
+ }
+
+ if (request[n]->engine != siblings[nsibling - n - 1]) {
+ pr_err("Executed on wrong sibling '%s', expected '%s'\n",
+ request[n]->engine->name,
+ siblings[nsibling - n - 1]->name);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ err = igt_live_test_end(&t);
+out:
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ for (n = 0; n < nsibling; n++)
+ i915_request_put(request[n]);
+
+out_unpin:
+ intel_context_unpin(ve);
+out_put:
+ intel_context_put(ve);
+out_close:
+ return err;
+}
+
+static int live_virtual_mask(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ unsigned int nsibling;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ err = mask_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int slicein_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ const long timeout = slice_timeout(siblings[0]);
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct igt_spinner spin;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * Virtual requests must take part in timeslicing on the target engines.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for (n = 0; n < nsibling; n++) {
+ ce = intel_context_create(siblings[n]);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_add(rq);
+ }
+
+ ce = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, timeout) < 0) {
+ GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
+ __func__, rq->engine->name);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ }
+ i915_request_put(rq);
+
+out:
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int sliceout_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ const long timeout = slice_timeout(siblings[0]);
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct igt_spinner spin;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * Virtual requests must allow others a fair timeslice.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ /* XXX We do not handle oversubscription and fairness with normal rq */
+ for (n = 0; n < nsibling; n++) {
+ ce = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_add(rq);
+ }
+
+ for (n = 0; !err && n < nsibling; n++) {
+ ce = intel_context_create(siblings[n]);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, timeout) < 0) {
+ GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
+ __func__, siblings[n]->name);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ }
+ i915_request_put(rq);
+ }
+
+out:
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_virtual_slice(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ unsigned int nsibling;
+
+ nsibling = __select_siblings(gt, class, siblings,
+ intel_engine_has_timeslices);
+ if (nsibling < 2)
+ continue;
+
+ err = slicein_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+
+ err = sliceout_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int preserved_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct i915_request *last = NULL;
+ struct intel_context *ve;
+ struct i915_vma *scratch;
+ struct igt_live_test t;
+ unsigned int n;
+ int err = 0;
+ u32 *cs;
+
+ scratch = __vm_create_scratch_for_read(&siblings[0]->gt->ggtt->vm,
+ PAGE_SIZE);
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
+
+ err = i915_vma_sync(scratch);
+ if (err)
+ goto out_scratch;
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_scratch;
+ }
+
+ err = intel_context_pin(ve);
+ if (err)
+ goto out_put;
+
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
+ if (err)
+ goto out_unpin;
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ struct intel_engine_cs *engine = siblings[n % nsibling];
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_end;
+ }
+
+ i915_request_put(last);
+ last = i915_request_get(rq);
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto out_end;
+ }
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = CS_GPR(engine, n);
+ *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW);
+ *cs++ = n + 1;
+
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ /* Restrict this request to run on a particular engine */
+ rq->execution_mask = engine->mask;
+ i915_request_add(rq);
+ }
+
+ if (i915_request_wait(last, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto out_end;
+ }
+
+ cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_end;
+ }
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ if (cs[n] != n) {
+ pr_err("Incorrect value[%d] found for GPR[%d]\n",
+ cs[n], n);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(scratch->obj);
+
+out_end:
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ i915_request_put(last);
+out_unpin:
+ intel_context_unpin(ve);
+out_put:
+ intel_context_put(ve);
+out_scratch:
+ i915_vma_unpin_and_release(&scratch, 0);
+ return err;
+}
+
+static int live_virtual_preserved(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+
+ /*
+ * Check that the context image retains non-privileged (user) registers
+ * from one engine to the next. For this we check that the CS_GPR
+ * are preserved.
+ */
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ /* As we use CS_GPR we cannot run before they existed on all engines. */
+ if (INTEL_GEN(gt->i915) < 9)
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, err;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ err = preserved_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int bond_virtual_engine(struct intel_gt *gt,
+ unsigned int class,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling,
+ unsigned int flags)
+#define BOND_SCHEDULE BIT(0)
+{
+ struct intel_engine_cs *master;
+ struct i915_request *rq[16];
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ unsigned long n;
+ int err;
+
+ /*
+ * A set of bonded requests is intended to be run concurrently
+ * across a number of engines. We use one request per-engine
+ * and a magic fence to schedule each of the bonded requests
+ * at the same time. A consequence of our current scheduler is that
+ * we only move requests to the HW ready queue when the request
+ * becomes ready, that is when all of its prerequisite fences have
+ * been signaled. As one of those fences is the master submit fence,
+ * there is a delay on all secondary fences as the HW may be
+ * currently busy. Equally, as all the requests are independent,
+ * they may have other fences that delay individual request
+ * submission to HW. Ergo, we do not guarantee that all requests are
+ * immediately submitted to HW at the same time, just that if the
+ * rules are abided by, they are ready at the same time as the
+ * first is submitted. Userspace can embed semaphores in its batch
+ * to ensure parallel execution of its phases as it requires.
+ * Though naturally it gets requested that perhaps the scheduler should
+ * take care of parallel execution, even across preemption events on
+ * different HW. (The proper answer is of course "lalalala".)
+ *
+ * With the submit-fence, we have identified three possible phases
+ * of synchronisation depending on the master fence: queued (not
+ * ready), executing, and signaled. The first two are quite simple
+ * and checked below. However, the signaled master fence handling is
+ * contentious. Currently we do not distinguish between a signaled
+ * fence and an expired fence, as once signaled it does not convey
+ * any information about the previous execution. It may even be freed
+ * and hence checking later it may not exist at all. Ergo we currently
+ * do not apply the bonding constraint for an already signaled fence,
+ * as our expectation is that it should not constrain the secondaries
+ * and is outside of the scope of the bonded request API (i.e. all
+ * userspace requests are meant to be running in parallel). As
+ * it imposes no constraint, and is effectively a no-op, we do not
+ * check below as normal execution flows are checked extensively above.
+ *
+ * XXX Is the degenerate handling of signaled submit fences the
+ * expected behaviour for userpace?
+ */
+
+ GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ err = 0;
+ rq[0] = ERR_PTR(-ENOMEM);
+ for_each_engine(master, gt, id) {
+ struct i915_sw_fence fence = {};
+ struct intel_context *ce;
+
+ if (master->class == class)
+ continue;
+
+ ce = intel_context_create(master);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
+
+ rq[0] = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ intel_context_put(ce);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ goto out;
+ }
+ i915_request_get(rq[0]);
+
+ if (flags & BOND_SCHEDULE) {
+ onstack_fence_init(&fence);
+ err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit,
+ &fence,
+ GFP_KERNEL);
+ }
+
+ i915_request_add(rq[0]);
+ if (err < 0)
+ goto out;
+
+ if (!(flags & BOND_SCHEDULE) &&
+ !igt_wait_for_spinner(&spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ for (n = 0; n < nsibling; n++) {
+ struct intel_context *ve;
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+
+ err = intel_virtual_engine_attach_bond(ve->engine,
+ master,
+ siblings[n]);
+ if (err) {
+ intel_context_put(ve);
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+
+ err = intel_context_pin(ve);
+ intel_context_put(ve);
+ if (err) {
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+
+ rq[n + 1] = i915_request_create(ve);
+ intel_context_unpin(ve);
+ if (IS_ERR(rq[n + 1])) {
+ err = PTR_ERR(rq[n + 1]);
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+ i915_request_get(rq[n + 1]);
+
+ err = i915_request_await_execution(rq[n + 1],
+ &rq[0]->fence,
+ ve->engine->bond_execute);
+ i915_request_add(rq[n + 1]);
+ if (err < 0) {
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+ }
+ onstack_fence_fini(&fence);
+ intel_engine_flush_submission(master);
+ igt_spinner_end(&spin);
+
+ if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
+ pr_err("Master request did not execute (on %s)!\n",
+ rq[0]->engine->name);
+ err = -EIO;
+ goto out;
+ }
+
+ for (n = 0; n < nsibling; n++) {
+ if (i915_request_wait(rq[n + 1], 0,
+ MAX_SCHEDULE_TIMEOUT) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq[n + 1]->engine != siblings[n]) {
+ pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n",
+ siblings[n]->name,
+ rq[n + 1]->engine->name,
+ rq[0]->engine->name);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ for (n = 0; !IS_ERR(rq[n]); n++)
+ i915_request_put(rq[n]);
+ rq[0] = ERR_PTR(-ENOMEM);
+ }
+
+out:
+ for (n = 0; !IS_ERR(rq[n]); n++)
+ i915_request_put(rq[n]);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_virtual_bond(void *arg)
+{
+ static const struct phase {
+ const char *name;
+ unsigned int flags;
+ } phases[] = {
+ { "", 0 },
+ { "schedule", BOND_SCHEDULE },
+ { },
+ };
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ const struct phase *p;
+ int nsibling;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ for (p = phases; p->name; p++) {
+ err = bond_virtual_engine(gt,
+ class, siblings, nsibling,
+ p->flags);
+ if (err) {
+ pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
+ __func__, p->name, class, nsibling, err);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int reset_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct intel_engine_cs *engine;
+ struct intel_context *ve;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * In order to support offline error capture for fast preempt reset,
+ * we need to decouple the guilty request and ensure that it and its
+ * descendents are not executed while the capture is in progress.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_spin;
+ }
+
+ for (n = 0; n < nsibling; n++)
+ st_engine_heartbeat_disable(siblings[n]);
+
+ rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_heartbeat;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out_heartbeat;
+ }
+
+ engine = rq->engine;
+ GEM_BUG_ON(engine == ve->engine);
+
+ /* Take ownership of the reset and tasklet */
+ local_bh_disable();
+ if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags)) {
+ local_bh_enable();
+ intel_gt_set_wedged(gt);
+ err = -EBUSY;
+ goto out_heartbeat;
+ }
+ tasklet_disable(&engine->execlists.tasklet);
+
+ engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ /* Fake a preemption event; failed of course */
+ spin_lock_irq(&engine->active.lock);
+ __unwind_incomplete_requests(engine);
+ spin_unlock_irq(&engine->active.lock);
+ GEM_BUG_ON(rq->engine != engine);
+
+ /* Reset the engine while keeping our active request on hold */
+ execlists_hold(engine, rq);
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ __intel_engine_reset_bh(engine, NULL);
+ GEM_BUG_ON(rq->fence.error != -EIO);
+
+ /* Release our grasp on the engine, letting CS flow again */
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
+ local_bh_enable();
+
+ /* Check that we do not resubmit the held request */
+ i915_request_get(rq);
+ if (!i915_request_wait(rq, 0, HZ / 5)) {
+ pr_err("%s: on hold request completed!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto out_rq;
+ }
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ /* But is resubmitted on release */
+ execlists_unhold(engine, rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("%s: held request did not complete!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_heartbeat:
+ for (n = 0; n < nsibling; n++)
+ st_engine_heartbeat_enable(siblings[n]);
+
+ intel_context_put(ve);
+out_spin:
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_virtual_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+
+ /*
+ * Check that we handle a reset event within a virtual engine.
+ * Only the physical engine is reset, but we have to check the flow
+ * of the virtual requests around the reset, and make sure it is not
+ * forgotten.
+ */
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, err;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ err = reset_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int intel_execlists_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_sanitycheck),
+ SUBTEST(live_unlite_switch),
+ SUBTEST(live_unlite_preempt),
+ SUBTEST(live_unlite_ring),
+ SUBTEST(live_pin_rewind),
+ SUBTEST(live_hold_reset),
+ SUBTEST(live_error_interrupt),
+ SUBTEST(live_timeslice_preempt),
+ SUBTEST(live_timeslice_rewind),
+ SUBTEST(live_timeslice_queue),
+ SUBTEST(live_timeslice_nopreempt),
+ SUBTEST(live_busywait_preempt),
+ SUBTEST(live_preempt),
+ SUBTEST(live_late_preempt),
+ SUBTEST(live_nopreempt),
+ SUBTEST(live_preempt_cancel),
+ SUBTEST(live_suppress_self_preempt),
+ SUBTEST(live_chain_preempt),
+ SUBTEST(live_preempt_ring),
+ SUBTEST(live_preempt_gang),
+ SUBTEST(live_preempt_timeout),
+ SUBTEST(live_preempt_user),
+ SUBTEST(live_preempt_smoke),
+ SUBTEST(live_virtual_engine),
+ SUBTEST(live_virtual_mask),
+ SUBTEST(live_virtual_preserved),
+ SUBTEST(live_virtual_slice),
+ SUBTEST(live_virtual_bond),
+ SUBTEST(live_virtual_reset),
+ };
+
+ if (!HAS_EXECLISTS(i915))
+ return 0;
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index 6180a47c1b51..5d911f724ebe 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -71,7 +71,7 @@ static int live_gt_clocks(void *arg)
enum intel_engine_id id;
int err = 0;
- if (!RUNTIME_INFO(gt->i915)->cs_timestamp_frequency_hz) { /* unknown */
+ if (!gt->clock_frequency) { /* unknown */
pr_info("CS_TIMESTAMP frequency unknown\n");
return 0;
}
@@ -112,12 +112,12 @@ static int live_gt_clocks(void *arg)
measure_clocks(engine, &cycles, &dt);
- time = i915_cs_timestamp_ticks_to_ns(engine->i915, cycles);
- expected = i915_cs_timestamp_ns_to_ticks(engine->i915, dt);
+ time = intel_gt_clock_interval_to_ns(engine->gt, cycles);
+ expected = intel_gt_ns_to_clock_interval(engine->gt, dt);
pr_info("%s: TIMESTAMP %d cycles [%lldns] in %lldns [%d cycles], using CS clock frequency of %uKHz\n",
engine->name, cycles, time, dt, expected,
- RUNTIME_INFO(engine->i915)->cs_timestamp_frequency_hz / 1000);
+ engine->gt->clock_frequency / 1000);
if (9 * time < 8 * dt || 8 * time > 9 * dt) {
pr_err("%s: CS ticks did not match walltime!\n",
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index fb5ebf930ab2..463bb6a700c8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -506,7 +506,8 @@ static int igt_reset_nop_engine(void *arg)
}
err = intel_engine_reset(engine, NULL);
if (err) {
- pr_err("i915_reset_engine failed\n");
+ pr_err("intel_engine_reset(%s) failed, err:%d\n",
+ engine->name, err);
break;
}
@@ -539,6 +540,149 @@ static int igt_reset_nop_engine(void *arg)
return 0;
}
+static void force_reset_timeout(struct intel_engine_cs *engine)
+{
+ engine->reset_timeout.probability = 999;
+ atomic_set(&engine->reset_timeout.times, -1);
+}
+
+static void cancel_reset_timeout(struct intel_engine_cs *engine)
+{
+ memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout));
+}
+
+static int igt_reset_fail_engine(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* Check that we can recover from engine-reset failues */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ unsigned int count;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ st_engine_heartbeat_disable(engine);
+ set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+
+ force_reset_timeout(engine);
+ err = intel_engine_reset(engine, NULL);
+ cancel_reset_timeout(engine);
+ if (err == 0) /* timeouts only generated on gen8+ */
+ goto skip;
+
+ count = 0;
+ do {
+ struct i915_request *last = NULL;
+ int i;
+
+ if (!wait_for_idle(engine)) {
+ pr_err("%s failed to idle before reset\n",
+ engine->name);
+ err = -EIO;
+ break;
+ }
+
+ for (i = 0; i < count % 15; i++) {
+ struct i915_request *rq;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+ intel_engine_dump(engine, &p,
+ "%s(%s): failed to submit request\n",
+ __func__,
+ engine->name);
+
+ GEM_TRACE("%s(%s): failed to submit request\n",
+ __func__,
+ engine->name);
+ GEM_TRACE_DUMP();
+
+ intel_gt_set_wedged(gt);
+ if (last)
+ i915_request_put(last);
+
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (last)
+ i915_request_put(last);
+ last = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+
+ if (count & 1) {
+ err = intel_engine_reset(engine, NULL);
+ if (err) {
+ GEM_TRACE_ERR("intel_engine_reset(%s) failed, err:%d\n",
+ engine->name, err);
+ GEM_TRACE_DUMP();
+ i915_request_put(last);
+ break;
+ }
+ } else {
+ force_reset_timeout(engine);
+ err = intel_engine_reset(engine, NULL);
+ cancel_reset_timeout(engine);
+ if (err != -ETIMEDOUT) {
+ pr_err("intel_engine_reset(%s) did not fail, err:%d\n",
+ engine->name, err);
+ i915_request_put(last);
+ break;
+ }
+ }
+
+ err = 0;
+ if (last) {
+ if (i915_request_wait(last, 0, HZ / 2) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ intel_engine_dump(engine, &p,
+ "%s(%s): failed to complete request\n",
+ __func__,
+ engine->name);
+
+ GEM_TRACE("%s(%s): failed to complete request\n",
+ __func__,
+ engine->name);
+ GEM_TRACE_DUMP();
+
+ err = -EIO;
+ }
+ i915_request_put(last);
+ }
+ count++;
+ } while (err == 0 && time_before(jiffies, end_time));
+out:
+ pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
+skip:
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ st_engine_heartbeat_enable(engine);
+ intel_context_put(ce);
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int __igt_reset_engine(struct intel_gt *gt, bool active)
{
struct i915_gpu_error *global = &gt->i915->gpu_error;
@@ -560,6 +704,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
for_each_engine(engine, gt, id) {
unsigned int reset_count, reset_engine_count;
+ unsigned long count;
IGT_TIMEOUT(end_time);
if (active && !intel_engine_can_store_dword(engine))
@@ -577,6 +722,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
st_engine_heartbeat_disable(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ count = 0;
do {
if (active) {
struct i915_request *rq;
@@ -608,7 +754,8 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
err = intel_engine_reset(engine, NULL);
if (err) {
- pr_err("i915_reset_engine failed\n");
+ pr_err("intel_engine_reset(%s) failed, err:%d\n",
+ engine->name, err);
break;
}
@@ -625,9 +772,13 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
err = -EINVAL;
break;
}
+
+ count++;
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
st_engine_heartbeat_enable(engine);
+ pr_info("%s: Completed %lu %s resets\n",
+ engine->name, count, active ? "active" : "idle");
if (err)
break;
@@ -1478,7 +1629,8 @@ static int igt_reset_queue(void *arg)
prev = rq;
count++;
} while (time_before(jiffies, end_time));
- pr_info("%s: Completed %d resets\n", engine->name, count);
+ pr_info("%s: Completed %d queued resets\n",
+ engine->name, count);
*h.batch = MI_BATCH_BUFFER_END;
intel_gt_chipset_flush(engine->gt);
@@ -1575,13 +1727,21 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
engine->name, mode, p->name);
- tasklet_disable(t);
+ if (t->func)
+ tasklet_disable(t);
+ if (strcmp(p->name, "softirq"))
+ local_bh_disable();
p->critical_section_begin();
- err = intel_engine_reset(engine, NULL);
+ err = __intel_engine_reset_bh(engine, NULL);
p->critical_section_end();
- tasklet_enable(t);
+ if (strcmp(p->name, "softirq"))
+ local_bh_enable();
+ if (t->func) {
+ tasklet_enable(t);
+ tasklet_hi_schedule(t);
+ }
if (err)
pr_err("i915_reset_engine(%s:%s) failed under %s\n",
@@ -1687,6 +1847,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_reset_nop_engine),
SUBTEST(igt_reset_idle_engine),
SUBTEST(igt_reset_active_engine),
+ SUBTEST(igt_reset_fail_engine),
SUBTEST(igt_reset_engines),
SUBTEST(igt_reset_engines_atomic),
SUBTEST(igt_reset_queue),
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 95d41c01d0e0..920979a89413 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -1,22 +1,22 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2018 Intel Corporation
*/
#include <linux/prime_numbers.h>
-#include "gem/i915_gem_pm.h"
-#include "gt/intel_engine_heartbeat.h"
-#include "gt/intel_reset.h"
-#include "gt/selftest_engine_heartbeat.h"
-
#include "i915_selftest.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_reset.h"
+#include "intel_ring.h"
+#include "selftest_engine_heartbeat.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_live_test.h"
#include "selftests/igt_spinner.h"
#include "selftests/lib_sw_fence.h"
+#include "shmem_utils.h"
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
@@ -27,29 +27,7 @@
static struct i915_vma *create_scratch(struct intel_gt *gt)
{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int err;
-
- obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- return vma;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
- if (err) {
- i915_gem_object_put(obj);
- return ERR_PTR(err);
- }
-
- return vma;
+ return __vm_create_scratch_for_read(&gt->ggtt->vm, PAGE_SIZE);
}
static bool is_active(struct i915_request *rq)
@@ -70,6 +48,9 @@ static int wait_for_submit(struct intel_engine_cs *engine,
struct i915_request *rq,
unsigned long timeout)
{
+ /* Ignore our own attempts to suppress excess tasklets */
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+
timeout += jiffies;
do {
bool done = time_after(jiffies, timeout);
@@ -89,4623 +70,6 @@ static int wait_for_submit(struct intel_engine_cs *engine,
} while (1);
}
-static int wait_for_reset(struct intel_engine_cs *engine,
- struct i915_request *rq,
- unsigned long timeout)
-{
- timeout += jiffies;
-
- do {
- cond_resched();
- intel_engine_flush_submission(engine);
-
- if (READ_ONCE(engine->execlists.pending[0]))
- continue;
-
- if (i915_request_completed(rq))
- break;
-
- if (READ_ONCE(rq->fence.error))
- break;
- } while (time_before(jiffies, timeout));
-
- flush_scheduled_work();
-
- if (rq->fence.error != -EIO) {
- pr_err("%s: hanging request %llx:%lld not reset\n",
- engine->name,
- rq->fence.context,
- rq->fence.seqno);
- return -EINVAL;
- }
-
- /* Give the request a jiffie to complete after flushing the worker */
- if (i915_request_wait(rq, 0,
- max(0l, (long)(timeout - jiffies)) + 1) < 0) {
- pr_err("%s: hanging request %llx:%lld did not complete\n",
- engine->name,
- rq->fence.context,
- rq->fence.seqno);
- return -ETIME;
- }
-
- return 0;
-}
-
-static int live_sanitycheck(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct igt_spinner spin;
- int err = 0;
-
- if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
- return 0;
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce;
- struct i915_request *rq;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- break;
- }
-
- rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_ctx;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin, rq)) {
- GEM_TRACE("spinner failed to start\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto out_ctx;
- }
-
- igt_spinner_end(&spin);
- if (igt_flush_test(gt->i915)) {
- err = -EIO;
- goto out_ctx;
- }
-
-out_ctx:
- intel_context_put(ce);
- if (err)
- break;
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_unlite_restore(struct intel_gt *gt, int prio)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct igt_spinner spin;
- int err = -ENOMEM;
-
- /*
- * Check that we can correctly context switch between 2 instances
- * on the same engine from the same parent context.
- */
-
- if (igt_spinner_init(&spin, gt))
- return err;
-
- err = 0;
- for_each_engine(engine, gt, id) {
- struct intel_context *ce[2] = {};
- struct i915_request *rq[2];
- struct igt_live_test t;
- int n;
-
- if (prio && !intel_engine_has_preemption(engine))
- continue;
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- break;
- }
- st_engine_heartbeat_disable(engine);
-
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- struct intel_context *tmp;
-
- tmp = intel_context_create(engine);
- if (IS_ERR(tmp)) {
- err = PTR_ERR(tmp);
- goto err_ce;
- }
-
- err = intel_context_pin(tmp);
- if (err) {
- intel_context_put(tmp);
- goto err_ce;
- }
-
- /*
- * Setup the pair of contexts such that if we
- * lite-restore using the RING_TAIL from ce[1] it
- * will execute garbage from ce[0]->ring.
- */
- memset(tmp->ring->vaddr,
- POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */
- tmp->ring->vma->size);
-
- ce[n] = tmp;
- }
- GEM_BUG_ON(!ce[1]->ring->size);
- intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
- __execlists_update_reg_state(ce[1], engine, ce[1]->ring->head);
-
- rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
- if (IS_ERR(rq[0])) {
- err = PTR_ERR(rq[0]);
- goto err_ce;
- }
-
- i915_request_get(rq[0]);
- i915_request_add(rq[0]);
- GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
-
- if (!igt_wait_for_spinner(&spin, rq[0])) {
- i915_request_put(rq[0]);
- goto err_ce;
- }
-
- rq[1] = i915_request_create(ce[1]);
- if (IS_ERR(rq[1])) {
- err = PTR_ERR(rq[1]);
- i915_request_put(rq[0]);
- goto err_ce;
- }
-
- if (!prio) {
- /*
- * Ensure we do the switch to ce[1] on completion.
- *
- * rq[0] is already submitted, so this should reduce
- * to a no-op (a wait on a request on the same engine
- * uses the submit fence, not the completion fence),
- * but it will install a dependency on rq[1] for rq[0]
- * that will prevent the pair being reordered by
- * timeslicing.
- */
- i915_request_await_dma_fence(rq[1], &rq[0]->fence);
- }
-
- i915_request_get(rq[1]);
- i915_request_add(rq[1]);
- GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix);
- i915_request_put(rq[0]);
-
- if (prio) {
- struct i915_sched_attr attr = {
- .priority = prio,
- };
-
- /* Alternatively preempt the spinner with ce[1] */
- engine->schedule(rq[1], &attr);
- }
-
- /* And switch back to ce[0] for good measure */
- rq[0] = i915_request_create(ce[0]);
- if (IS_ERR(rq[0])) {
- err = PTR_ERR(rq[0]);
- i915_request_put(rq[1]);
- goto err_ce;
- }
-
- i915_request_await_dma_fence(rq[0], &rq[1]->fence);
- i915_request_get(rq[0]);
- i915_request_add(rq[0]);
- GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix);
- i915_request_put(rq[1]);
- i915_request_put(rq[0]);
-
-err_ce:
- intel_engine_flush_submission(engine);
- igt_spinner_end(&spin);
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- if (IS_ERR_OR_NULL(ce[n]))
- break;
-
- intel_context_unpin(ce[n]);
- intel_context_put(ce[n]);
- }
-
- st_engine_heartbeat_enable(engine);
- if (igt_live_test_end(&t))
- err = -EIO;
- if (err)
- break;
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_unlite_switch(void *arg)
-{
- return live_unlite_restore(arg, 0);
-}
-
-static int live_unlite_preempt(void *arg)
-{
- return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
-}
-
-static int live_unlite_ring(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct igt_spinner spin;
- enum intel_engine_id id;
- int err = 0;
-
- /*
- * Setup a preemption event that will cause almost the entire ring
- * to be unwound, potentially fooling our intel_ring_direction()
- * into emitting a forward lite-restore instead of the rollback.
- */
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce[2] = {};
- struct i915_request *rq;
- struct igt_live_test t;
- int n;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- break;
- }
- st_engine_heartbeat_disable(engine);
-
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- struct intel_context *tmp;
-
- tmp = intel_context_create(engine);
- if (IS_ERR(tmp)) {
- err = PTR_ERR(tmp);
- goto err_ce;
- }
-
- err = intel_context_pin(tmp);
- if (err) {
- intel_context_put(tmp);
- goto err_ce;
- }
-
- memset32(tmp->ring->vaddr,
- 0xdeadbeef, /* trigger a hang if executed */
- tmp->ring->vma->size / sizeof(u32));
-
- ce[n] = tmp;
- }
-
- /* Create max prio spinner, followed by N low prio nops */
- rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ce;
- }
-
- i915_request_get(rq);
- rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(&spin, rq)) {
- intel_gt_set_wedged(gt);
- i915_request_put(rq);
- err = -ETIME;
- goto err_ce;
- }
-
- /* Fill the ring, until we will cause a wrap */
- n = 0;
- while (intel_ring_direction(ce[0]->ring,
- rq->wa_tail,
- ce[0]->ring->tail) <= 0) {
- struct i915_request *tmp;
-
- tmp = intel_context_create_request(ce[0]);
- if (IS_ERR(tmp)) {
- err = PTR_ERR(tmp);
- i915_request_put(rq);
- goto err_ce;
- }
-
- i915_request_add(tmp);
- intel_engine_flush_submission(engine);
- n++;
- }
- intel_engine_flush_submission(engine);
- pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
- engine->name, n,
- ce[0]->ring->size,
- ce[0]->ring->tail,
- ce[0]->ring->emit,
- rq->tail);
- GEM_BUG_ON(intel_ring_direction(ce[0]->ring,
- rq->tail,
- ce[0]->ring->tail) <= 0);
- i915_request_put(rq);
-
- /* Create a second ring to preempt the first ring after rq[0] */
- rq = intel_context_create_request(ce[1]);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ce;
- }
-
- rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- i915_request_get(rq);
- i915_request_add(rq);
-
- err = wait_for_submit(engine, rq, HZ / 2);
- i915_request_put(rq);
- if (err) {
- pr_err("%s: preemption request was not submitted\n",
- engine->name);
- err = -ETIME;
- }
-
- pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
- engine->name,
- ce[0]->ring->tail, ce[0]->ring->emit,
- ce[1]->ring->tail, ce[1]->ring->emit);
-
-err_ce:
- intel_engine_flush_submission(engine);
- igt_spinner_end(&spin);
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- if (IS_ERR_OR_NULL(ce[n]))
- break;
-
- intel_context_unpin(ce[n]);
- intel_context_put(ce[n]);
- }
- st_engine_heartbeat_enable(engine);
- if (igt_live_test_end(&t))
- err = -EIO;
- if (err)
- break;
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_pin_rewind(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = 0;
-
- /*
- * We have to be careful not to trust intel_ring too much, for example
- * ring->head is updated upon retire which is out of sync with pinning
- * the context. Thus we cannot use ring->head to set CTX_RING_HEAD,
- * or else we risk writing an older, stale value.
- *
- * To simulate this, let's apply a bit of deliberate sabotague.
- */
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce;
- struct i915_request *rq;
- struct intel_ring *ring;
- struct igt_live_test t;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- break;
- }
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- break;
- }
-
- err = intel_context_pin(ce);
- if (err) {
- intel_context_put(ce);
- break;
- }
-
- /* Keep the context awake while we play games */
- err = i915_active_acquire(&ce->active);
- if (err) {
- intel_context_unpin(ce);
- intel_context_put(ce);
- break;
- }
- ring = ce->ring;
-
- /* Poison the ring, and offset the next request from HEAD */
- memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
- ring->emit = ring->size / 2;
- ring->tail = ring->emit;
- GEM_BUG_ON(ring->head);
-
- intel_context_unpin(ce);
-
- /* Submit a simple nop request */
- GEM_BUG_ON(intel_context_is_pinned(ce));
- rq = intel_context_create_request(ce);
- i915_active_release(&ce->active); /* e.g. async retire */
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- break;
- }
- GEM_BUG_ON(!rq->head);
- i915_request_add(rq);
-
- /* Expect not to hang! */
- if (igt_live_test_end(&t)) {
- err = -EIO;
- break;
- }
- }
-
- return err;
-}
-
-static int live_hold_reset(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct igt_spinner spin;
- int err = 0;
-
- /*
- * In order to support offline error capture for fast preempt reset,
- * we need to decouple the guilty request and ensure that it and its
- * descendents are not executed while the capture is in progress.
- */
-
- if (!intel_has_reset_engine(gt))
- return 0;
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce;
- struct i915_request *rq;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- break;
- }
-
- st_engine_heartbeat_disable(engine);
-
- rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(&spin, rq)) {
- intel_gt_set_wedged(gt);
- err = -ETIME;
- goto out;
- }
-
- /* We have our request executing, now remove it and reset */
-
- if (test_and_set_bit(I915_RESET_ENGINE + id,
- &gt->reset.flags)) {
- intel_gt_set_wedged(gt);
- err = -EBUSY;
- goto out;
- }
- tasklet_disable(&engine->execlists.tasklet);
-
- engine->execlists.tasklet.func(engine->execlists.tasklet.data);
- GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
-
- i915_request_get(rq);
- execlists_hold(engine, rq);
- GEM_BUG_ON(!i915_request_on_hold(rq));
-
- intel_engine_reset(engine, NULL);
- GEM_BUG_ON(rq->fence.error != -EIO);
-
- tasklet_enable(&engine->execlists.tasklet);
- clear_and_wake_up_bit(I915_RESET_ENGINE + id,
- &gt->reset.flags);
-
- /* Check that we do not resubmit the held request */
- if (!i915_request_wait(rq, 0, HZ / 5)) {
- pr_err("%s: on hold request completed!\n",
- engine->name);
- i915_request_put(rq);
- err = -EIO;
- goto out;
- }
- GEM_BUG_ON(!i915_request_on_hold(rq));
-
- /* But is resubmitted on release */
- execlists_unhold(engine, rq);
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- pr_err("%s: held request did not complete!\n",
- engine->name);
- intel_gt_set_wedged(gt);
- err = -ETIME;
- }
- i915_request_put(rq);
-
-out:
- st_engine_heartbeat_enable(engine);
- intel_context_put(ce);
- if (err)
- break;
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static const char *error_repr(int err)
-{
- return err ? "bad" : "good";
-}
-
-static int live_error_interrupt(void *arg)
-{
- static const struct error_phase {
- enum { GOOD = 0, BAD = -EIO } error[2];
- } phases[] = {
- { { BAD, GOOD } },
- { { BAD, BAD } },
- { { BAD, GOOD } },
- { { GOOD, GOOD } }, /* sentinel */
- };
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /*
- * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning
- * of invalid commands in user batches that will cause a GPU hang.
- * This is a faster mechanism than using hangcheck/heartbeats, but
- * only detects problems the HW knows about -- it will not warn when
- * we kill the HW!
- *
- * To verify our detection and reset, we throw some invalid commands
- * at the HW and wait for the interrupt.
- */
-
- if (!intel_has_reset_engine(gt))
- return 0;
-
- for_each_engine(engine, gt, id) {
- const struct error_phase *p;
- int err = 0;
-
- st_engine_heartbeat_disable(engine);
-
- for (p = phases; p->error[0] != GOOD; p++) {
- struct i915_request *client[ARRAY_SIZE(phases->error)];
- u32 *cs;
- int i;
-
- memset(client, 0, sizeof(*client));
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- struct intel_context *ce;
- struct i915_request *rq;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = intel_context_create_request(ce);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- if (rq->engine->emit_init_breadcrumb) {
- err = rq->engine->emit_init_breadcrumb(rq);
- if (err) {
- i915_request_add(rq);
- goto out;
- }
- }
-
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- err = PTR_ERR(cs);
- goto out;
- }
-
- if (p->error[i]) {
- *cs++ = 0xdeadbeef;
- *cs++ = 0xdeadbeef;
- } else {
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- }
-
- client[i] = i915_request_get(rq);
- i915_request_add(rq);
- }
-
- err = wait_for_submit(engine, client[0], HZ / 2);
- if (err) {
- pr_err("%s: first request did not start within time!\n",
- engine->name);
- err = -ETIME;
- goto out;
- }
-
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- if (i915_request_wait(client[i], 0, HZ / 5) < 0)
- pr_debug("%s: %s request incomplete!\n",
- engine->name,
- error_repr(p->error[i]));
-
- if (!i915_request_started(client[i])) {
- pr_err("%s: %s request not started!\n",
- engine->name,
- error_repr(p->error[i]));
- err = -ETIME;
- goto out;
- }
-
- /* Kick the tasklet to process the error */
- intel_engine_flush_submission(engine);
- if (client[i]->fence.error != p->error[i]) {
- pr_err("%s: %s request (%s) with wrong error code: %d\n",
- engine->name,
- error_repr(p->error[i]),
- i915_request_completed(client[i]) ? "completed" : "running",
- client[i]->fence.error);
- err = -EINVAL;
- goto out;
- }
- }
-
-out:
- for (i = 0; i < ARRAY_SIZE(client); i++)
- if (client[i])
- i915_request_put(client[i]);
- if (err) {
- pr_err("%s: failed at phase[%zd] { %d, %d }\n",
- engine->name, p - phases,
- p->error[0], p->error[1]);
- break;
- }
- }
-
- st_engine_heartbeat_enable(engine);
- if (err) {
- intel_gt_set_wedged(gt);
- return err;
- }
- }
-
- return 0;
-}
-
-static int
-emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 10);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_NEQ_SDD;
- *cs++ = 0;
- *cs++ = i915_ggtt_offset(vma) + 4 * idx;
- *cs++ = 0;
-
- if (idx > 0) {
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
- *cs++ = 0;
- *cs++ = 1;
- } else {
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- }
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
-
- intel_ring_advance(rq, cs);
- return 0;
-}
-
-static struct i915_request *
-semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
-{
- struct intel_context *ce;
- struct i915_request *rq;
- int err;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce))
- return ERR_CAST(ce);
-
- rq = intel_context_create_request(ce);
- if (IS_ERR(rq))
- goto out_ce;
-
- err = 0;
- if (rq->engine->emit_init_breadcrumb)
- err = rq->engine->emit_init_breadcrumb(rq);
- if (err == 0)
- err = emit_semaphore_chain(rq, vma, idx);
- if (err == 0)
- i915_request_get(rq);
- i915_request_add(rq);
- if (err)
- rq = ERR_PTR(err);
-
-out_ce:
- intel_context_put(ce);
- return rq;
-}
-
-static int
-release_queue(struct intel_engine_cs *engine,
- struct i915_vma *vma,
- int idx, int prio)
-{
- struct i915_sched_attr attr = {
- .priority = prio,
- };
- struct i915_request *rq;
- u32 *cs;
-
- rq = intel_engine_create_kernel_request(engine);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- return PTR_ERR(cs);
- }
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
- *cs++ = 0;
- *cs++ = 1;
-
- intel_ring_advance(rq, cs);
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- local_bh_disable();
- engine->schedule(rq, &attr);
- local_bh_enable(); /* kick tasklet */
-
- i915_request_put(rq);
-
- return 0;
-}
-
-static int
-slice_semaphore_queue(struct intel_engine_cs *outer,
- struct i915_vma *vma,
- int count)
-{
- struct intel_engine_cs *engine;
- struct i915_request *head;
- enum intel_engine_id id;
- int err, i, n = 0;
-
- head = semaphore_queue(outer, vma, n++);
- if (IS_ERR(head))
- return PTR_ERR(head);
-
- for_each_engine(engine, outer->gt, id) {
- for (i = 0; i < count; i++) {
- struct i915_request *rq;
-
- rq = semaphore_queue(engine, vma, n++);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- i915_request_put(rq);
- }
- }
-
- err = release_queue(outer, vma, n, I915_PRIORITY_BARRIER);
- if (err)
- goto out;
-
- if (i915_request_wait(head, 0,
- 2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) {
- pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
- count, n);
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(outer->gt);
- err = -EIO;
- }
-
-out:
- i915_request_put(head);
- return err;
-}
-
-static int live_timeslice_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct drm_i915_gem_object *obj;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct i915_vma *vma;
- void *vaddr;
- int err = 0;
-
- /*
- * If a request takes too long, we would like to give other users
- * a fair go on the GPU. In particular, users may create batches
- * that wait upon external input, where that input may even be
- * supplied by another GPU job. To avoid blocking forever, we
- * need to preempt the current task and replace it with another
- * ready task.
- */
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- return 0;
-
- obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
- if (err)
- goto err_map;
-
- err = i915_vma_sync(vma);
- if (err)
- goto err_pin;
-
- for_each_engine(engine, gt, id) {
- if (!intel_engine_has_preemption(engine))
- continue;
-
- memset(vaddr, 0, PAGE_SIZE);
-
- st_engine_heartbeat_disable(engine);
- err = slice_semaphore_queue(engine, vma, 5);
- st_engine_heartbeat_enable(engine);
- if (err)
- goto err_pin;
-
- if (igt_flush_test(gt->i915)) {
- err = -EIO;
- goto err_pin;
- }
- }
-
-err_pin:
- i915_vma_unpin(vma);
-err_map:
- i915_gem_object_unpin_map(obj);
-err_obj:
- i915_gem_object_put(obj);
- return err;
-}
-
-static struct i915_request *
-create_rewinder(struct intel_context *ce,
- struct i915_request *wait,
- void *slot, int idx)
-{
- const u32 offset =
- i915_ggtt_offset(ce->engine->status_page.vma) +
- offset_in_page(slot);
- struct i915_request *rq;
- u32 *cs;
- int err;
-
- rq = intel_context_create_request(ce);
- if (IS_ERR(rq))
- return rq;
-
- if (wait) {
- err = i915_request_await_dma_fence(rq, &wait->fence);
- if (err)
- goto err;
- }
-
- cs = intel_ring_begin(rq, 14);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- goto err;
- }
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- *cs++ = MI_NOOP;
-
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_GTE_SDD;
- *cs++ = idx;
- *cs++ = offset;
- *cs++ = 0;
-
- *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
- *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
- *cs++ = offset + idx * sizeof(u32);
- *cs++ = 0;
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = offset;
- *cs++ = 0;
- *cs++ = idx + 1;
-
- intel_ring_advance(rq, cs);
-
- rq->sched.attr.priority = I915_PRIORITY_MASK;
- err = 0;
-err:
- i915_request_get(rq);
- i915_request_add(rq);
- if (err) {
- i915_request_put(rq);
- return ERR_PTR(err);
- }
-
- return rq;
-}
-
-static int live_timeslice_rewind(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /*
- * The usual presumption on timeslice expiration is that we replace
- * the active context with another. However, given a chain of
- * dependencies we may end up with replacing the context with itself,
- * but only a few of those requests, forcing us to rewind the
- * RING_TAIL of the original request.
- */
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- return 0;
-
- for_each_engine(engine, gt, id) {
- enum { A1, A2, B1 };
- enum { X = 1, Z, Y };
- struct i915_request *rq[3] = {};
- struct intel_context *ce;
- unsigned long timeslice;
- int i, err = 0;
- u32 *slot;
-
- if (!intel_engine_has_timeslices(engine))
- continue;
-
- /*
- * A:rq1 -- semaphore wait, timestamp X
- * A:rq2 -- write timestamp Y
- *
- * B:rq1 [await A:rq1] -- write timestamp Z
- *
- * Force timeslice, release semaphore.
- *
- * Expect execution/evaluation order XZY
- */
-
- st_engine_heartbeat_disable(engine);
- timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
-
- slot = memset32(engine->status_page.addr + 1000, 0, 4);
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto err;
- }
-
- rq[A1] = create_rewinder(ce, NULL, slot, X);
- if (IS_ERR(rq[A1])) {
- intel_context_put(ce);
- goto err;
- }
-
- rq[A2] = create_rewinder(ce, NULL, slot, Y);
- intel_context_put(ce);
- if (IS_ERR(rq[A2]))
- goto err;
-
- err = wait_for_submit(engine, rq[A2], HZ / 2);
- if (err) {
- pr_err("%s: failed to submit first context\n",
- engine->name);
- goto err;
- }
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto err;
- }
-
- rq[B1] = create_rewinder(ce, rq[A1], slot, Z);
- intel_context_put(ce);
- if (IS_ERR(rq[2]))
- goto err;
-
- err = wait_for_submit(engine, rq[B1], HZ / 2);
- if (err) {
- pr_err("%s: failed to submit second context\n",
- engine->name);
- goto err;
- }
-
- /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
- ENGINE_TRACE(engine, "forcing tasklet for rewind\n");
- if (i915_request_is_active(rq[A2])) { /* semaphore yielded! */
- /* Wait for the timeslice to kick in */
- del_timer(&engine->execlists.timer);
- tasklet_hi_schedule(&engine->execlists.tasklet);
- intel_engine_flush_submission(engine);
- }
- /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
- GEM_BUG_ON(!i915_request_is_active(rq[A1]));
- GEM_BUG_ON(!i915_request_is_active(rq[B1]));
- GEM_BUG_ON(i915_request_is_active(rq[A2]));
-
- /* Release the hounds! */
- slot[0] = 1;
- wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */
-
- for (i = 1; i <= 3; i++) {
- unsigned long timeout = jiffies + HZ / 2;
-
- while (!READ_ONCE(slot[i]) &&
- time_before(jiffies, timeout))
- ;
-
- if (!time_before(jiffies, timeout)) {
- pr_err("%s: rq[%d] timed out\n",
- engine->name, i - 1);
- err = -ETIME;
- goto err;
- }
-
- pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]);
- }
-
- /* XZY: XZ < XY */
- if (slot[Z] - slot[X] >= slot[Y] - slot[X]) {
- pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n",
- engine->name,
- slot[Z] - slot[X],
- slot[Y] - slot[X]);
- err = -EINVAL;
- }
-
-err:
- memset32(&slot[0], -1, 4);
- wmb();
-
- engine->props.timeslice_duration_ms = timeslice;
- st_engine_heartbeat_enable(engine);
- for (i = 0; i < 3; i++)
- i915_request_put(rq[i]);
- if (igt_flush_test(gt->i915))
- err = -EIO;
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static struct i915_request *nop_request(struct intel_engine_cs *engine)
-{
- struct i915_request *rq;
-
- rq = intel_engine_create_kernel_request(engine);
- if (IS_ERR(rq))
- return rq;
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- return rq;
-}
-
-static long slice_timeout(struct intel_engine_cs *engine)
-{
- long timeout;
-
- /* Enough time for a timeslice to kick in, and kick out */
- timeout = 2 * msecs_to_jiffies_timeout(timeslice(engine));
-
- /* Enough time for the nop request to complete */
- timeout += HZ / 5;
-
- return timeout + 1;
-}
-
-static int live_timeslice_queue(void *arg)
-{
- struct intel_gt *gt = arg;
- struct drm_i915_gem_object *obj;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct i915_vma *vma;
- void *vaddr;
- int err = 0;
-
- /*
- * Make sure that even if ELSP[0] and ELSP[1] are filled with
- * timeslicing between them disabled, we *do* enable timeslicing
- * if the queue demands it. (Normally, we do not submit if
- * ELSP[1] is already occupied, so must rely on timeslicing to
- * eject ELSP[0] in favour of the queue.)
- */
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- return 0;
-
- obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
- if (err)
- goto err_map;
-
- err = i915_vma_sync(vma);
- if (err)
- goto err_pin;
-
- for_each_engine(engine, gt, id) {
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
- };
- struct i915_request *rq, *nop;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- st_engine_heartbeat_disable(engine);
- memset(vaddr, 0, PAGE_SIZE);
-
- /* ELSP[0]: semaphore wait */
- rq = semaphore_queue(engine, vma, 0);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_heartbeat;
- }
- engine->schedule(rq, &attr);
- err = wait_for_submit(engine, rq, HZ / 2);
- if (err) {
- pr_err("%s: Timed out trying to submit semaphores\n",
- engine->name);
- goto err_rq;
- }
-
- /* ELSP[1]: nop request */
- nop = nop_request(engine);
- if (IS_ERR(nop)) {
- err = PTR_ERR(nop);
- goto err_rq;
- }
- err = wait_for_submit(engine, nop, HZ / 2);
- i915_request_put(nop);
- if (err) {
- pr_err("%s: Timed out trying to submit nop\n",
- engine->name);
- goto err_rq;
- }
-
- GEM_BUG_ON(i915_request_completed(rq));
- GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
-
- /* Queue: semaphore signal, matching priority as semaphore */
- err = release_queue(engine, vma, 1, effective_prio(rq));
- if (err)
- goto err_rq;
-
- /* Wait until we ack the release_queue and start timeslicing */
- do {
- cond_resched();
- intel_engine_flush_submission(engine);
- } while (READ_ONCE(engine->execlists.pending[0]));
-
- /* Timeslice every jiffy, so within 2 we should signal */
- if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) {
- struct drm_printer p =
- drm_info_printer(gt->i915->drm.dev);
-
- pr_err("%s: Failed to timeslice into queue\n",
- engine->name);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
-
- memset(vaddr, 0xff, PAGE_SIZE);
- err = -EIO;
- }
-err_rq:
- i915_request_put(rq);
-err_heartbeat:
- st_engine_heartbeat_enable(engine);
- if (err)
- break;
- }
-
-err_pin:
- i915_vma_unpin(vma);
-err_map:
- i915_gem_object_unpin_map(obj);
-err_obj:
- i915_gem_object_put(obj);
- return err;
-}
-
-static int live_timeslice_nopreempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct igt_spinner spin;
- int err = 0;
-
- /*
- * We should not timeslice into a request that is marked with
- * I915_REQUEST_NOPREEMPT.
- */
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- return 0;
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce;
- struct i915_request *rq;
- unsigned long timeslice;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- break;
- }
-
- st_engine_heartbeat_disable(engine);
- timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
-
- /* Create an unpreemptible spinner */
-
- rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_heartbeat;
- }
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(&spin, rq)) {
- i915_request_put(rq);
- err = -ETIME;
- goto out_spin;
- }
-
- set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
- i915_request_put(rq);
-
- /* Followed by a maximum priority barrier (heartbeat) */
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out_spin;
- }
-
- rq = intel_context_create_request(ce);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_spin;
- }
-
- rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- i915_request_get(rq);
- i915_request_add(rq);
-
- /*
- * Wait until the barrier is in ELSP, and we know timeslicing
- * will have been activated.
- */
- if (wait_for_submit(engine, rq, HZ / 2)) {
- i915_request_put(rq);
- err = -ETIME;
- goto out_spin;
- }
-
- /*
- * Since the ELSP[0] request is unpreemptible, it should not
- * allow the maximum priority barrier through. Wait long
- * enough to see if it is timesliced in by mistake.
- */
- if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) {
- pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n",
- engine->name);
- err = -EINVAL;
- }
- i915_request_put(rq);
-
-out_spin:
- igt_spinner_end(&spin);
-out_heartbeat:
- xchg(&engine->props.timeslice_duration_ms, timeslice);
- st_engine_heartbeat_enable(engine);
- if (err)
- break;
-
- if (igt_flush_test(gt->i915)) {
- err = -EIO;
- break;
- }
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_busywait_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct i915_gem_context *ctx_hi, *ctx_lo;
- struct intel_engine_cs *engine;
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- enum intel_engine_id id;
- int err = -ENOMEM;
- u32 *map;
-
- /*
- * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
- * preempt the busywaits used to synchronise between rings.
- */
-
- ctx_hi = kernel_context(gt->i915);
- if (!ctx_hi)
- return -ENOMEM;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
-
- ctx_lo = kernel_context(gt->i915);
- if (!ctx_lo)
- goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
-
- obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto err_ctx_lo;
- }
-
- map = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(map)) {
- err = PTR_ERR(map);
- goto err_obj;
- }
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_map;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
- if (err)
- goto err_map;
-
- err = i915_vma_sync(vma);
- if (err)
- goto err_vma;
-
- for_each_engine(engine, gt, id) {
- struct i915_request *lo, *hi;
- struct igt_live_test t;
- u32 *cs;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- goto err_vma;
- }
-
- /*
- * We create two requests. The low priority request
- * busywaits on a semaphore (inside the ringbuffer where
- * is should be preemptible) and the high priority requests
- * uses a MI_STORE_DWORD_IMM to update the semaphore value
- * allowing the first request to complete. If preemption
- * fails, we hang instead.
- */
-
- lo = igt_request_alloc(ctx_lo, engine);
- if (IS_ERR(lo)) {
- err = PTR_ERR(lo);
- goto err_vma;
- }
-
- cs = intel_ring_begin(lo, 8);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- i915_request_add(lo);
- goto err_vma;
- }
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_ggtt_offset(vma);
- *cs++ = 0;
- *cs++ = 1;
-
- /* XXX Do we need a flush + invalidate here? */
-
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_EQ_SDD;
- *cs++ = 0;
- *cs++ = i915_ggtt_offset(vma);
- *cs++ = 0;
-
- intel_ring_advance(lo, cs);
-
- i915_request_get(lo);
- i915_request_add(lo);
-
- if (wait_for(READ_ONCE(*map), 10)) {
- i915_request_put(lo);
- err = -ETIMEDOUT;
- goto err_vma;
- }
-
- /* Low priority request should be busywaiting now */
- if (i915_request_wait(lo, 0, 1) != -ETIME) {
- i915_request_put(lo);
- pr_err("%s: Busywaiting request did not!\n",
- engine->name);
- err = -EIO;
- goto err_vma;
- }
-
- hi = igt_request_alloc(ctx_hi, engine);
- if (IS_ERR(hi)) {
- err = PTR_ERR(hi);
- i915_request_put(lo);
- goto err_vma;
- }
-
- cs = intel_ring_begin(hi, 4);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- i915_request_add(hi);
- i915_request_put(lo);
- goto err_vma;
- }
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_ggtt_offset(vma);
- *cs++ = 0;
- *cs++ = 0;
-
- intel_ring_advance(hi, cs);
- i915_request_add(hi);
-
- if (i915_request_wait(lo, 0, HZ / 5) < 0) {
- struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
-
- pr_err("%s: Failed to preempt semaphore busywait!\n",
- engine->name);
-
- intel_engine_dump(engine, &p, "%s\n", engine->name);
- GEM_TRACE_DUMP();
-
- i915_request_put(lo);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_vma;
- }
- GEM_BUG_ON(READ_ONCE(*map));
- i915_request_put(lo);
-
- if (igt_live_test_end(&t)) {
- err = -EIO;
- goto err_vma;
- }
- }
-
- err = 0;
-err_vma:
- i915_vma_unpin(vma);
-err_map:
- i915_gem_object_unpin_map(obj);
-err_obj:
- i915_gem_object_put(obj);
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
- return err;
-}
-
-static struct i915_request *
-spinner_create_request(struct igt_spinner *spin,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u32 arb)
-{
- struct intel_context *ce;
- struct i915_request *rq;
-
- ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
- if (IS_ERR(ce))
- return ERR_CAST(ce);
-
- rq = igt_spinner_create_request(spin, ce, arb);
- intel_context_put(ce);
- return rq;
-}
-
-static int live_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct i915_gem_context *ctx_hi, *ctx_lo;
- struct igt_spinner spin_hi, spin_lo;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
- pr_err("Logical preemption supported, but not exposed\n");
-
- if (igt_spinner_init(&spin_hi, gt))
- return -ENOMEM;
-
- if (igt_spinner_init(&spin_lo, gt))
- goto err_spin_hi;
-
- ctx_hi = kernel_context(gt->i915);
- if (!ctx_hi)
- goto err_spin_lo;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
-
- ctx_lo = kernel_context(gt->i915);
- if (!ctx_lo)
- goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
-
- for_each_engine(engine, gt, id) {
- struct igt_live_test t;
- struct i915_request *rq;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- goto err_ctx_lo;
- }
-
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin_lo, rq)) {
- GEM_TRACE("lo spinner failed to start\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- igt_spinner_end(&spin_lo);
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin_hi, rq)) {
- GEM_TRACE("hi spinner failed to start\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- igt_spinner_end(&spin_hi);
- igt_spinner_end(&spin_lo);
-
- if (igt_live_test_end(&t)) {
- err = -EIO;
- goto err_ctx_lo;
- }
- }
-
- err = 0;
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
-err_spin_lo:
- igt_spinner_fini(&spin_lo);
-err_spin_hi:
- igt_spinner_fini(&spin_hi);
- return err;
-}
-
-static int live_late_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct i915_gem_context *ctx_hi, *ctx_lo;
- struct igt_spinner spin_hi, spin_lo;
- struct intel_engine_cs *engine;
- struct i915_sched_attr attr = {};
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (igt_spinner_init(&spin_hi, gt))
- return -ENOMEM;
-
- if (igt_spinner_init(&spin_lo, gt))
- goto err_spin_hi;
-
- ctx_hi = kernel_context(gt->i915);
- if (!ctx_hi)
- goto err_spin_lo;
-
- ctx_lo = kernel_context(gt->i915);
- if (!ctx_lo)
- goto err_ctx_hi;
-
- /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
- ctx_lo->sched.priority = I915_USER_PRIORITY(1);
-
- for_each_engine(engine, gt, id) {
- struct igt_live_test t;
- struct i915_request *rq;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- goto err_ctx_lo;
- }
-
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin_lo, rq)) {
- pr_err("First context failed to start\n");
- goto err_wedged;
- }
-
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_NOOP);
- if (IS_ERR(rq)) {
- igt_spinner_end(&spin_lo);
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (igt_wait_for_spinner(&spin_hi, rq)) {
- pr_err("Second context overtook first?\n");
- goto err_wedged;
- }
-
- attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
- engine->schedule(rq, &attr);
-
- if (!igt_wait_for_spinner(&spin_hi, rq)) {
- pr_err("High priority context failed to preempt the low priority context\n");
- GEM_TRACE_DUMP();
- goto err_wedged;
- }
-
- igt_spinner_end(&spin_hi);
- igt_spinner_end(&spin_lo);
-
- if (igt_live_test_end(&t)) {
- err = -EIO;
- goto err_ctx_lo;
- }
- }
-
- err = 0;
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
-err_spin_lo:
- igt_spinner_fini(&spin_lo);
-err_spin_hi:
- igt_spinner_fini(&spin_hi);
- return err;
-
-err_wedged:
- igt_spinner_end(&spin_hi);
- igt_spinner_end(&spin_lo);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
-}
-
-struct preempt_client {
- struct igt_spinner spin;
- struct i915_gem_context *ctx;
-};
-
-static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c)
-{
- c->ctx = kernel_context(gt->i915);
- if (!c->ctx)
- return -ENOMEM;
-
- if (igt_spinner_init(&c->spin, gt))
- goto err_ctx;
-
- return 0;
-
-err_ctx:
- kernel_context_close(c->ctx);
- return -ENOMEM;
-}
-
-static void preempt_client_fini(struct preempt_client *c)
-{
- igt_spinner_fini(&c->spin);
- kernel_context_close(c->ctx);
-}
-
-static int live_nopreempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct preempt_client a, b;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- /*
- * Verify that we can disable preemption for an individual request
- * that may be being observed and not want to be interrupted.
- */
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (preempt_client_init(gt, &a))
- return -ENOMEM;
- if (preempt_client_init(gt, &b))
- goto err_client_a;
- b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
-
- for_each_engine(engine, gt, id) {
- struct i915_request *rq_a, *rq_b;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- engine->execlists.preempt_hang.count = 0;
-
- rq_a = spinner_create_request(&a.spin,
- a.ctx, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq_a)) {
- err = PTR_ERR(rq_a);
- goto err_client_b;
- }
-
- /* Low priority client, but unpreemptable! */
- __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
-
- i915_request_add(rq_a);
- if (!igt_wait_for_spinner(&a.spin, rq_a)) {
- pr_err("First client failed to start\n");
- goto err_wedged;
- }
-
- rq_b = spinner_create_request(&b.spin,
- b.ctx, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq_b)) {
- err = PTR_ERR(rq_b);
- goto err_client_b;
- }
-
- i915_request_add(rq_b);
-
- /* B is much more important than A! (But A is unpreemptable.) */
- GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
-
- /* Wait long enough for preemption and timeslicing */
- if (igt_wait_for_spinner(&b.spin, rq_b)) {
- pr_err("Second client started too early!\n");
- goto err_wedged;
- }
-
- igt_spinner_end(&a.spin);
-
- if (!igt_wait_for_spinner(&b.spin, rq_b)) {
- pr_err("Second client failed to start\n");
- goto err_wedged;
- }
-
- igt_spinner_end(&b.spin);
-
- if (engine->execlists.preempt_hang.count) {
- pr_err("Preemption recorded x%d; should have been suppressed!\n",
- engine->execlists.preempt_hang.count);
- err = -EINVAL;
- goto err_wedged;
- }
-
- if (igt_flush_test(gt->i915))
- goto err_wedged;
- }
-
- err = 0;
-err_client_b:
- preempt_client_fini(&b);
-err_client_a:
- preempt_client_fini(&a);
- return err;
-
-err_wedged:
- igt_spinner_end(&b.spin);
- igt_spinner_end(&a.spin);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_client_b;
-}
-
-struct live_preempt_cancel {
- struct intel_engine_cs *engine;
- struct preempt_client a, b;
-};
-
-static int __cancel_active0(struct live_preempt_cancel *arg)
-{
- struct i915_request *rq;
- struct igt_live_test t;
- int err;
-
- /* Preempt cancel of ELSP0 */
- GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
- if (igt_live_test_begin(&t, arg->engine->i915,
- __func__, arg->engine->name))
- return -EIO;
-
- rq = spinner_create_request(&arg->a.spin,
- arg->a.ctx, arg->engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- clear_bit(CONTEXT_BANNED, &rq->context->flags);
- i915_request_get(rq);
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
- err = -EIO;
- goto out;
- }
-
- intel_context_set_banned(rq->context);
- err = intel_engine_pulse(arg->engine);
- if (err)
- goto out;
-
- err = wait_for_reset(arg->engine, rq, HZ / 2);
- if (err) {
- pr_err("Cancelled inflight0 request did not reset\n");
- goto out;
- }
-
-out:
- i915_request_put(rq);
- if (igt_live_test_end(&t))
- err = -EIO;
- return err;
-}
-
-static int __cancel_active1(struct live_preempt_cancel *arg)
-{
- struct i915_request *rq[2] = {};
- struct igt_live_test t;
- int err;
-
- /* Preempt cancel of ELSP1 */
- GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
- if (igt_live_test_begin(&t, arg->engine->i915,
- __func__, arg->engine->name))
- return -EIO;
-
- rq[0] = spinner_create_request(&arg->a.spin,
- arg->a.ctx, arg->engine,
- MI_NOOP); /* no preemption */
- if (IS_ERR(rq[0]))
- return PTR_ERR(rq[0]);
-
- clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
- i915_request_get(rq[0]);
- i915_request_add(rq[0]);
- if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
- err = -EIO;
- goto out;
- }
-
- rq[1] = spinner_create_request(&arg->b.spin,
- arg->b.ctx, arg->engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq[1])) {
- err = PTR_ERR(rq[1]);
- goto out;
- }
-
- clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
- i915_request_get(rq[1]);
- err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
- i915_request_add(rq[1]);
- if (err)
- goto out;
-
- intel_context_set_banned(rq[1]->context);
- err = intel_engine_pulse(arg->engine);
- if (err)
- goto out;
-
- igt_spinner_end(&arg->a.spin);
- err = wait_for_reset(arg->engine, rq[1], HZ / 2);
- if (err)
- goto out;
-
- if (rq[0]->fence.error != 0) {
- pr_err("Normal inflight0 request did not complete\n");
- err = -EINVAL;
- goto out;
- }
-
- if (rq[1]->fence.error != -EIO) {
- pr_err("Cancelled inflight1 request did not report -EIO\n");
- err = -EINVAL;
- goto out;
- }
-
-out:
- i915_request_put(rq[1]);
- i915_request_put(rq[0]);
- if (igt_live_test_end(&t))
- err = -EIO;
- return err;
-}
-
-static int __cancel_queued(struct live_preempt_cancel *arg)
-{
- struct i915_request *rq[3] = {};
- struct igt_live_test t;
- int err;
-
- /* Full ELSP and one in the wings */
- GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
- if (igt_live_test_begin(&t, arg->engine->i915,
- __func__, arg->engine->name))
- return -EIO;
-
- rq[0] = spinner_create_request(&arg->a.spin,
- arg->a.ctx, arg->engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq[0]))
- return PTR_ERR(rq[0]);
-
- clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
- i915_request_get(rq[0]);
- i915_request_add(rq[0]);
- if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
- err = -EIO;
- goto out;
- }
-
- rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
- if (IS_ERR(rq[1])) {
- err = PTR_ERR(rq[1]);
- goto out;
- }
-
- clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
- i915_request_get(rq[1]);
- err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
- i915_request_add(rq[1]);
- if (err)
- goto out;
-
- rq[2] = spinner_create_request(&arg->b.spin,
- arg->a.ctx, arg->engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq[2])) {
- err = PTR_ERR(rq[2]);
- goto out;
- }
-
- i915_request_get(rq[2]);
- err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
- i915_request_add(rq[2]);
- if (err)
- goto out;
-
- intel_context_set_banned(rq[2]->context);
- err = intel_engine_pulse(arg->engine);
- if (err)
- goto out;
-
- err = wait_for_reset(arg->engine, rq[2], HZ / 2);
- if (err)
- goto out;
-
- if (rq[0]->fence.error != -EIO) {
- pr_err("Cancelled inflight0 request did not report -EIO\n");
- err = -EINVAL;
- goto out;
- }
-
- if (rq[1]->fence.error != 0) {
- pr_err("Normal inflight1 request did not complete\n");
- err = -EINVAL;
- goto out;
- }
-
- if (rq[2]->fence.error != -EIO) {
- pr_err("Cancelled queued request did not report -EIO\n");
- err = -EINVAL;
- goto out;
- }
-
-out:
- i915_request_put(rq[2]);
- i915_request_put(rq[1]);
- i915_request_put(rq[0]);
- if (igt_live_test_end(&t))
- err = -EIO;
- return err;
-}
-
-static int __cancel_hostile(struct live_preempt_cancel *arg)
-{
- struct i915_request *rq;
- int err;
-
- /* Preempt cancel non-preemptible spinner in ELSP0 */
- if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
- return 0;
-
- if (!intel_has_reset_engine(arg->engine->gt))
- return 0;
-
- GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
- rq = spinner_create_request(&arg->a.spin,
- arg->a.ctx, arg->engine,
- MI_NOOP); /* preemption disabled */
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- clear_bit(CONTEXT_BANNED, &rq->context->flags);
- i915_request_get(rq);
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
- err = -EIO;
- goto out;
- }
-
- intel_context_set_banned(rq->context);
- err = intel_engine_pulse(arg->engine); /* force reset */
- if (err)
- goto out;
-
- err = wait_for_reset(arg->engine, rq, HZ / 2);
- if (err) {
- pr_err("Cancelled inflight0 request did not reset\n");
- goto out;
- }
-
-out:
- i915_request_put(rq);
- if (igt_flush_test(arg->engine->i915))
- err = -EIO;
- return err;
-}
-
-static int live_preempt_cancel(void *arg)
-{
- struct intel_gt *gt = arg;
- struct live_preempt_cancel data;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- /*
- * To cancel an inflight context, we need to first remove it from the
- * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
- */
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (preempt_client_init(gt, &data.a))
- return -ENOMEM;
- if (preempt_client_init(gt, &data.b))
- goto err_client_a;
-
- for_each_engine(data.engine, gt, id) {
- if (!intel_engine_has_preemption(data.engine))
- continue;
-
- err = __cancel_active0(&data);
- if (err)
- goto err_wedged;
-
- err = __cancel_active1(&data);
- if (err)
- goto err_wedged;
-
- err = __cancel_queued(&data);
- if (err)
- goto err_wedged;
-
- err = __cancel_hostile(&data);
- if (err)
- goto err_wedged;
- }
-
- err = 0;
-err_client_b:
- preempt_client_fini(&data.b);
-err_client_a:
- preempt_client_fini(&data.a);
- return err;
-
-err_wedged:
- GEM_TRACE_DUMP();
- igt_spinner_end(&data.b.spin);
- igt_spinner_end(&data.a.spin);
- intel_gt_set_wedged(gt);
- goto err_client_b;
-}
-
-static int live_suppress_self_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
- };
- struct preempt_client a, b;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- /*
- * Verify that if a preemption request does not cause a change in
- * the current execution order, the preempt-to-idle injection is
- * skipped and that we do not accidentally apply it after the CS
- * completion event.
- */
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0; /* presume black blox */
-
- if (intel_vgpu_active(gt->i915))
- return 0; /* GVT forces single port & request submission */
-
- if (preempt_client_init(gt, &a))
- return -ENOMEM;
- if (preempt_client_init(gt, &b))
- goto err_client_a;
-
- for_each_engine(engine, gt, id) {
- struct i915_request *rq_a, *rq_b;
- int depth;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (igt_flush_test(gt->i915))
- goto err_wedged;
-
- st_engine_heartbeat_disable(engine);
- engine->execlists.preempt_hang.count = 0;
-
- rq_a = spinner_create_request(&a.spin,
- a.ctx, engine,
- MI_NOOP);
- if (IS_ERR(rq_a)) {
- err = PTR_ERR(rq_a);
- st_engine_heartbeat_enable(engine);
- goto err_client_b;
- }
-
- i915_request_add(rq_a);
- if (!igt_wait_for_spinner(&a.spin, rq_a)) {
- pr_err("First client failed to start\n");
- st_engine_heartbeat_enable(engine);
- goto err_wedged;
- }
-
- /* Keep postponing the timer to avoid premature slicing */
- mod_timer(&engine->execlists.timer, jiffies + HZ);
- for (depth = 0; depth < 8; depth++) {
- rq_b = spinner_create_request(&b.spin,
- b.ctx, engine,
- MI_NOOP);
- if (IS_ERR(rq_b)) {
- err = PTR_ERR(rq_b);
- st_engine_heartbeat_enable(engine);
- goto err_client_b;
- }
- i915_request_add(rq_b);
-
- GEM_BUG_ON(i915_request_completed(rq_a));
- engine->schedule(rq_a, &attr);
- igt_spinner_end(&a.spin);
-
- if (!igt_wait_for_spinner(&b.spin, rq_b)) {
- pr_err("Second client failed to start\n");
- st_engine_heartbeat_enable(engine);
- goto err_wedged;
- }
-
- swap(a, b);
- rq_a = rq_b;
- }
- igt_spinner_end(&a.spin);
-
- if (engine->execlists.preempt_hang.count) {
- pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n",
- engine->name,
- engine->execlists.preempt_hang.count,
- depth);
- st_engine_heartbeat_enable(engine);
- err = -EINVAL;
- goto err_client_b;
- }
-
- st_engine_heartbeat_enable(engine);
- if (igt_flush_test(gt->i915))
- goto err_wedged;
- }
-
- err = 0;
-err_client_b:
- preempt_client_fini(&b);
-err_client_a:
- preempt_client_fini(&a);
- return err;
-
-err_wedged:
- igt_spinner_end(&b.spin);
- igt_spinner_end(&a.spin);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_client_b;
-}
-
-static int live_chain_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct preempt_client hi, lo;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- /*
- * Build a chain AB...BA between two contexts (A, B) and request
- * preemption of the last request. It should then complete before
- * the previously submitted spinner in B.
- */
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (preempt_client_init(gt, &hi))
- return -ENOMEM;
-
- if (preempt_client_init(gt, &lo))
- goto err_client_hi;
-
- for_each_engine(engine, gt, id) {
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
- };
- struct igt_live_test t;
- struct i915_request *rq;
- int ring_size, count, i;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- rq = spinner_create_request(&lo.spin,
- lo.ctx, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq))
- goto err_wedged;
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- ring_size = rq->wa_tail - rq->head;
- if (ring_size < 0)
- ring_size += rq->ring->size;
- ring_size = rq->ring->size / ring_size;
- pr_debug("%s(%s): Using maximum of %d requests\n",
- __func__, engine->name, ring_size);
-
- igt_spinner_end(&lo.spin);
- if (i915_request_wait(rq, 0, HZ / 2) < 0) {
- pr_err("Timed out waiting to flush %s\n", engine->name);
- i915_request_put(rq);
- goto err_wedged;
- }
- i915_request_put(rq);
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- goto err_wedged;
- }
-
- for_each_prime_number_from(count, 1, ring_size) {
- rq = spinner_create_request(&hi.spin,
- hi.ctx, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq))
- goto err_wedged;
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&hi.spin, rq))
- goto err_wedged;
-
- rq = spinner_create_request(&lo.spin,
- lo.ctx, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq))
- goto err_wedged;
- i915_request_add(rq);
-
- for (i = 0; i < count; i++) {
- rq = igt_request_alloc(lo.ctx, engine);
- if (IS_ERR(rq))
- goto err_wedged;
- i915_request_add(rq);
- }
-
- rq = igt_request_alloc(hi.ctx, engine);
- if (IS_ERR(rq))
- goto err_wedged;
-
- i915_request_get(rq);
- i915_request_add(rq);
- engine->schedule(rq, &attr);
-
- igt_spinner_end(&hi.spin);
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- struct drm_printer p =
- drm_info_printer(gt->i915->drm.dev);
-
- pr_err("Failed to preempt over chain of %d\n",
- count);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
- i915_request_put(rq);
- goto err_wedged;
- }
- igt_spinner_end(&lo.spin);
- i915_request_put(rq);
-
- rq = igt_request_alloc(lo.ctx, engine);
- if (IS_ERR(rq))
- goto err_wedged;
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- struct drm_printer p =
- drm_info_printer(gt->i915->drm.dev);
-
- pr_err("Failed to flush low priority chain of %d requests\n",
- count);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
-
- i915_request_put(rq);
- goto err_wedged;
- }
- i915_request_put(rq);
- }
-
- if (igt_live_test_end(&t)) {
- err = -EIO;
- goto err_wedged;
- }
- }
-
- err = 0;
-err_client_lo:
- preempt_client_fini(&lo);
-err_client_hi:
- preempt_client_fini(&hi);
- return err;
-
-err_wedged:
- igt_spinner_end(&hi.spin);
- igt_spinner_end(&lo.spin);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_client_lo;
-}
-
-static int create_gang(struct intel_engine_cs *engine,
- struct i915_request **prev)
-{
- struct drm_i915_gem_object *obj;
- struct intel_context *ce;
- struct i915_request *rq;
- struct i915_vma *vma;
- u32 *cs;
- int err;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce))
- return PTR_ERR(ce);
-
- obj = i915_gem_object_create_internal(engine->i915, 4096);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto err_ce;
- }
-
- vma = i915_vma_instance(obj, ce->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto err_obj;
-
- cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(cs))
- goto err_obj;
-
- /* Semaphore target: spin until zero */
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_EQ_SDD;
- *cs++ = 0;
- *cs++ = lower_32_bits(vma->node.start);
- *cs++ = upper_32_bits(vma->node.start);
-
- if (*prev) {
- u64 offset = (*prev)->batch->node.start;
-
- /* Terminate the spinner in the next lower priority batch. */
- *cs++ = MI_STORE_DWORD_IMM_GEN4;
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
- *cs++ = 0;
- }
-
- *cs++ = MI_BATCH_BUFFER_END;
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
-
- rq = intel_context_create_request(ce);
- if (IS_ERR(rq))
- goto err_obj;
-
- rq->batch = i915_vma_get(vma);
- i915_request_get(rq);
-
- i915_vma_lock(vma);
- err = i915_request_await_object(rq, vma->obj, false);
- if (!err)
- err = i915_vma_move_to_active(vma, rq, 0);
- if (!err)
- err = rq->engine->emit_bb_start(rq,
- vma->node.start,
- PAGE_SIZE, 0);
- i915_vma_unlock(vma);
- i915_request_add(rq);
- if (err)
- goto err_rq;
-
- i915_gem_object_put(obj);
- intel_context_put(ce);
-
- rq->mock.link.next = &(*prev)->mock.link;
- *prev = rq;
- return 0;
-
-err_rq:
- i915_vma_put(rq->batch);
- i915_request_put(rq);
-err_obj:
- i915_gem_object_put(obj);
-err_ce:
- intel_context_put(ce);
- return err;
-}
-
-static int __live_preempt_ring(struct intel_engine_cs *engine,
- struct igt_spinner *spin,
- int queue_sz, int ring_sz)
-{
- struct intel_context *ce[2] = {};
- struct i915_request *rq;
- struct igt_live_test t;
- int err = 0;
- int n;
-
- if (igt_live_test_begin(&t, engine->i915, __func__, engine->name))
- return -EIO;
-
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- struct intel_context *tmp;
-
- tmp = intel_context_create(engine);
- if (IS_ERR(tmp)) {
- err = PTR_ERR(tmp);
- goto err_ce;
- }
-
- tmp->ring = __intel_context_ring_size(ring_sz);
-
- err = intel_context_pin(tmp);
- if (err) {
- intel_context_put(tmp);
- goto err_ce;
- }
-
- memset32(tmp->ring->vaddr,
- 0xdeadbeef, /* trigger a hang if executed */
- tmp->ring->vma->size / sizeof(u32));
-
- ce[n] = tmp;
- }
-
- rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ce;
- }
-
- i915_request_get(rq);
- rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(spin, rq)) {
- intel_gt_set_wedged(engine->gt);
- i915_request_put(rq);
- err = -ETIME;
- goto err_ce;
- }
-
- /* Fill the ring, until we will cause a wrap */
- n = 0;
- while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
- struct i915_request *tmp;
-
- tmp = intel_context_create_request(ce[0]);
- if (IS_ERR(tmp)) {
- err = PTR_ERR(tmp);
- i915_request_put(rq);
- goto err_ce;
- }
-
- i915_request_add(tmp);
- intel_engine_flush_submission(engine);
- n++;
- }
- intel_engine_flush_submission(engine);
- pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
- engine->name, queue_sz, n,
- ce[0]->ring->size,
- ce[0]->ring->tail,
- ce[0]->ring->emit,
- rq->tail);
- i915_request_put(rq);
-
- /* Create a second request to preempt the first ring */
- rq = intel_context_create_request(ce[1]);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ce;
- }
-
- rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- i915_request_get(rq);
- i915_request_add(rq);
-
- err = wait_for_submit(engine, rq, HZ / 2);
- i915_request_put(rq);
- if (err) {
- pr_err("%s: preemption request was not submited\n",
- engine->name);
- err = -ETIME;
- }
-
- pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
- engine->name,
- ce[0]->ring->tail, ce[0]->ring->emit,
- ce[1]->ring->tail, ce[1]->ring->emit);
-
-err_ce:
- intel_engine_flush_submission(engine);
- igt_spinner_end(spin);
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- if (IS_ERR_OR_NULL(ce[n]))
- break;
-
- intel_context_unpin(ce[n]);
- intel_context_put(ce[n]);
- }
- if (igt_live_test_end(&t))
- err = -EIO;
- return err;
-}
-
-static int live_preempt_ring(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct igt_spinner spin;
- enum intel_engine_id id;
- int err = 0;
-
- /*
- * Check that we rollback large chunks of a ring in order to do a
- * preemption event. Similar to live_unlite_ring, but looking at
- * ring size rather than the impact of intel_ring_direction().
- */
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for_each_engine(engine, gt, id) {
- int n;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- st_engine_heartbeat_disable(engine);
-
- for (n = 0; n <= 3; n++) {
- err = __live_preempt_ring(engine, &spin,
- n * SZ_4K / 4, SZ_4K);
- if (err)
- break;
- }
-
- st_engine_heartbeat_enable(engine);
- if (err)
- break;
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_preempt_gang(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- /*
- * Build as long a chain of preempters as we can, with each
- * request higher priority than the last. Once we are ready, we release
- * the last batch which then precolates down the chain, each releasing
- * the next oldest in turn. The intent is to simply push as hard as we
- * can with the number of preemptions, trying to exceed narrow HW
- * limits. At a minimum, we insist that we can sort all the user
- * high priority levels into execution order.
- */
-
- for_each_engine(engine, gt, id) {
- struct i915_request *rq = NULL;
- struct igt_live_test t;
- IGT_TIMEOUT(end_time);
- int prio = 0;
- int err = 0;
- u32 *cs;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name))
- return -EIO;
-
- do {
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(prio++),
- };
-
- err = create_gang(engine, &rq);
- if (err)
- break;
-
- /* Submit each spinner at increasing priority */
- engine->schedule(rq, &attr);
- } while (prio <= I915_PRIORITY_MAX &&
- !__igt_timeout(end_time, NULL));
- pr_debug("%s: Preempt chain of %d requests\n",
- engine->name, prio);
-
- /*
- * Such that the last spinner is the highest priority and
- * should execute first. When that spinner completes,
- * it will terminate the next lowest spinner until there
- * are no more spinners and the gang is complete.
- */
- cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC);
- if (!IS_ERR(cs)) {
- *cs = 0;
- i915_gem_object_unpin_map(rq->batch->obj);
- } else {
- err = PTR_ERR(cs);
- intel_gt_set_wedged(gt);
- }
-
- while (rq) { /* wait for each rq from highest to lowest prio */
- struct i915_request *n = list_next_entry(rq, mock.link);
-
- if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
- struct drm_printer p =
- drm_info_printer(engine->i915->drm.dev);
-
- pr_err("Failed to flush chain of %d requests, at %d\n",
- prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
-
- err = -ETIME;
- }
-
- i915_vma_put(rq->batch);
- i915_request_put(rq);
- rq = n;
- }
-
- if (igt_live_test_end(&t))
- err = -EIO;
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static struct i915_vma *
-create_gpr_user(struct intel_engine_cs *engine,
- struct i915_vma *result,
- unsigned int offset)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- u32 *cs;
- int err;
- int i;
-
- obj = i915_gem_object_create_internal(engine->i915, 4096);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- vma = i915_vma_instance(obj, result->vm, NULL);
- if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- return vma;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err) {
- i915_vma_put(vma);
- return ERR_PTR(err);
- }
-
- cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(cs)) {
- i915_vma_put(vma);
- return ERR_CAST(cs);
- }
-
- /* All GPR are clear for new contexts. We use GPR(0) as a constant */
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = CS_GPR(engine, 0);
- *cs++ = 1;
-
- for (i = 1; i < NUM_GPR; i++) {
- u64 addr;
-
- /*
- * Perform: GPR[i]++
- *
- * As we read and write into the context saved GPR[i], if
- * we restart this batch buffer from an earlier point, we
- * will repeat the increment and store a value > 1.
- */
- *cs++ = MI_MATH(4);
- *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i));
- *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0));
- *cs++ = MI_MATH_ADD;
- *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
-
- addr = result->node.start + offset + i * sizeof(*cs);
- *cs++ = MI_STORE_REGISTER_MEM_GEN8;
- *cs++ = CS_GPR(engine, 2 * i);
- *cs++ = lower_32_bits(addr);
- *cs++ = upper_32_bits(addr);
-
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_GTE_SDD;
- *cs++ = i;
- *cs++ = lower_32_bits(result->node.start);
- *cs++ = upper_32_bits(result->node.start);
- }
-
- *cs++ = MI_BATCH_BUFFER_END;
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
-
- return vma;
-}
-
-static struct i915_vma *create_global(struct intel_gt *gt, size_t sz)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int err;
-
- obj = i915_gem_object_create_internal(gt->i915, sz);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- return vma;
- }
-
- err = i915_ggtt_pin(vma, NULL, 0, 0);
- if (err) {
- i915_vma_put(vma);
- return ERR_PTR(err);
- }
-
- return vma;
-}
-
-static struct i915_request *
-create_gpr_client(struct intel_engine_cs *engine,
- struct i915_vma *global,
- unsigned int offset)
-{
- struct i915_vma *batch, *vma;
- struct intel_context *ce;
- struct i915_request *rq;
- int err;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce))
- return ERR_CAST(ce);
-
- vma = i915_vma_instance(global->obj, ce->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto out_ce;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto out_ce;
-
- batch = create_gpr_user(engine, vma, offset);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto out_vma;
- }
-
- rq = intel_context_create_request(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_batch;
- }
-
- i915_vma_lock(vma);
- err = i915_request_await_object(rq, vma->obj, false);
- if (!err)
- err = i915_vma_move_to_active(vma, rq, 0);
- i915_vma_unlock(vma);
-
- i915_vma_lock(batch);
- if (!err)
- err = i915_request_await_object(rq, batch->obj, false);
- if (!err)
- err = i915_vma_move_to_active(batch, rq, 0);
- if (!err)
- err = rq->engine->emit_bb_start(rq,
- batch->node.start,
- PAGE_SIZE, 0);
- i915_vma_unlock(batch);
- i915_vma_unpin(batch);
-
- if (!err)
- i915_request_get(rq);
- i915_request_add(rq);
-
-out_batch:
- i915_vma_put(batch);
-out_vma:
- i915_vma_unpin(vma);
-out_ce:
- intel_context_put(ce);
- return err ? ERR_PTR(err) : rq;
-}
-
-static int preempt_user(struct intel_engine_cs *engine,
- struct i915_vma *global,
- int id)
-{
- struct i915_sched_attr attr = {
- .priority = I915_PRIORITY_MAX
- };
- struct i915_request *rq;
- int err = 0;
- u32 *cs;
-
- rq = intel_engine_create_kernel_request(engine);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- return PTR_ERR(cs);
- }
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_ggtt_offset(global);
- *cs++ = 0;
- *cs++ = id;
-
- intel_ring_advance(rq, cs);
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- engine->schedule(rq, &attr);
-
- if (i915_request_wait(rq, 0, HZ / 2) < 0)
- err = -ETIME;
- i915_request_put(rq);
-
- return err;
-}
-
-static int live_preempt_user(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct i915_vma *global;
- enum intel_engine_id id;
- u32 *result;
- int err = 0;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- /*
- * In our other tests, we look at preemption in carefully
- * controlled conditions in the ringbuffer. Since most of the
- * time is spent in user batches, most of our preemptions naturally
- * occur there. We want to verify that when we preempt inside a batch
- * we continue on from the current instruction and do not roll back
- * to the start, or another earlier arbitration point.
- *
- * To verify this, we create a batch which is a mixture of
- * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with
- * a few preempting contexts thrown into the mix, we look for any
- * repeated instructions (which show up as incorrect values).
- */
-
- global = create_global(gt, 4096);
- if (IS_ERR(global))
- return PTR_ERR(global);
-
- result = i915_gem_object_pin_map(global->obj, I915_MAP_WC);
- if (IS_ERR(result)) {
- i915_vma_unpin_and_release(&global, 0);
- return PTR_ERR(result);
- }
-
- for_each_engine(engine, gt, id) {
- struct i915_request *client[3] = {};
- struct igt_live_test t;
- int i;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS)
- continue; /* we need per-context GPR */
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- break;
- }
-
- memset(result, 0, 4096);
-
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- struct i915_request *rq;
-
- rq = create_gpr_client(engine, global,
- NUM_GPR * i * sizeof(u32));
- if (IS_ERR(rq))
- goto end_test;
-
- client[i] = rq;
- }
-
- /* Continuously preempt the set of 3 running contexts */
- for (i = 1; i <= NUM_GPR; i++) {
- err = preempt_user(engine, global, i);
- if (err)
- goto end_test;
- }
-
- if (READ_ONCE(result[0]) != NUM_GPR) {
- pr_err("%s: Failed to release semaphore\n",
- engine->name);
- err = -EIO;
- goto end_test;
- }
-
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- int gpr;
-
- if (i915_request_wait(client[i], 0, HZ / 2) < 0) {
- err = -ETIME;
- goto end_test;
- }
-
- for (gpr = 1; gpr < NUM_GPR; gpr++) {
- if (result[NUM_GPR * i + gpr] != 1) {
- pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n",
- engine->name,
- i, gpr, result[NUM_GPR * i + gpr]);
- err = -EINVAL;
- goto end_test;
- }
- }
- }
-
-end_test:
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- if (!client[i])
- break;
-
- i915_request_put(client[i]);
- }
-
- /* Flush the semaphores on error */
- smp_store_mb(result[0], -1);
- if (igt_live_test_end(&t))
- err = -EIO;
- if (err)
- break;
- }
-
- i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP);
- return err;
-}
-
-static int live_preempt_timeout(void *arg)
-{
- struct intel_gt *gt = arg;
- struct i915_gem_context *ctx_hi, *ctx_lo;
- struct igt_spinner spin_lo;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- /*
- * Check that we force preemption to occur by cancelling the previous
- * context if it refuses to yield the GPU.
- */
- if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
- return 0;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (!intel_has_reset_engine(gt))
- return 0;
-
- if (igt_spinner_init(&spin_lo, gt))
- return -ENOMEM;
-
- ctx_hi = kernel_context(gt->i915);
- if (!ctx_hi)
- goto err_spin_lo;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
-
- ctx_lo = kernel_context(gt->i915);
- if (!ctx_lo)
- goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
-
- for_each_engine(engine, gt, id) {
- unsigned long saved_timeout;
- struct i915_request *rq;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_NOOP); /* preemption disabled */
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin_lo, rq)) {
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- rq = igt_request_alloc(ctx_hi, engine);
- if (IS_ERR(rq)) {
- igt_spinner_end(&spin_lo);
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- /* Flush the previous CS ack before changing timeouts */
- while (READ_ONCE(engine->execlists.pending[0]))
- cpu_relax();
-
- saved_timeout = engine->props.preempt_timeout_ms;
- engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- intel_engine_flush_submission(engine);
- engine->props.preempt_timeout_ms = saved_timeout;
-
- if (i915_request_wait(rq, 0, HZ / 10) < 0) {
- intel_gt_set_wedged(gt);
- i915_request_put(rq);
- err = -ETIME;
- goto err_ctx_lo;
- }
-
- igt_spinner_end(&spin_lo);
- i915_request_put(rq);
- }
-
- err = 0;
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
-err_spin_lo:
- igt_spinner_fini(&spin_lo);
- return err;
-}
-
-static int random_range(struct rnd_state *rnd, int min, int max)
-{
- return i915_prandom_u32_max_state(max - min, rnd) + min;
-}
-
-static int random_priority(struct rnd_state *rnd)
-{
- return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
-}
-
-struct preempt_smoke {
- struct intel_gt *gt;
- struct i915_gem_context **contexts;
- struct intel_engine_cs *engine;
- struct drm_i915_gem_object *batch;
- unsigned int ncontext;
- struct rnd_state prng;
- unsigned long count;
-};
-
-static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
-{
- return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
- &smoke->prng)];
-}
-
-static int smoke_submit(struct preempt_smoke *smoke,
- struct i915_gem_context *ctx, int prio,
- struct drm_i915_gem_object *batch)
-{
- struct i915_request *rq;
- struct i915_vma *vma = NULL;
- int err = 0;
-
- if (batch) {
- struct i915_address_space *vm;
-
- vm = i915_gem_context_get_vm_rcu(ctx);
- vma = i915_vma_instance(batch, vm, NULL);
- i915_vm_put(vm);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- return err;
- }
-
- ctx->sched.priority = prio;
-
- rq = igt_request_alloc(ctx, smoke->engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto unpin;
- }
-
- if (vma) {
- i915_vma_lock(vma);
- err = i915_request_await_object(rq, vma->obj, false);
- if (!err)
- err = i915_vma_move_to_active(vma, rq, 0);
- if (!err)
- err = rq->engine->emit_bb_start(rq,
- vma->node.start,
- PAGE_SIZE, 0);
- i915_vma_unlock(vma);
- }
-
- i915_request_add(rq);
-
-unpin:
- if (vma)
- i915_vma_unpin(vma);
-
- return err;
-}
-
-static int smoke_crescendo_thread(void *arg)
-{
- struct preempt_smoke *smoke = arg;
- IGT_TIMEOUT(end_time);
- unsigned long count;
-
- count = 0;
- do {
- struct i915_gem_context *ctx = smoke_context(smoke);
- int err;
-
- err = smoke_submit(smoke,
- ctx, count % I915_PRIORITY_MAX,
- smoke->batch);
- if (err)
- return err;
-
- count++;
- } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
-
- smoke->count = count;
- return 0;
-}
-
-static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
-#define BATCH BIT(0)
-{
- struct task_struct *tsk[I915_NUM_ENGINES] = {};
- struct preempt_smoke arg[I915_NUM_ENGINES];
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- unsigned long count;
- int err = 0;
-
- for_each_engine(engine, smoke->gt, id) {
- arg[id] = *smoke;
- arg[id].engine = engine;
- if (!(flags & BATCH))
- arg[id].batch = NULL;
- arg[id].count = 0;
-
- tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
- "igt/smoke:%d", id);
- if (IS_ERR(tsk[id])) {
- err = PTR_ERR(tsk[id]);
- break;
- }
- get_task_struct(tsk[id]);
- }
-
- yield(); /* start all threads before we kthread_stop() */
-
- count = 0;
- for_each_engine(engine, smoke->gt, id) {
- int status;
-
- if (IS_ERR_OR_NULL(tsk[id]))
- continue;
-
- status = kthread_stop(tsk[id]);
- if (status && !err)
- err = status;
-
- count += arg[id].count;
-
- put_task_struct(tsk[id]);
- }
-
- pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
- count, flags, smoke->gt->info.num_engines, smoke->ncontext);
- return 0;
-}
-
-static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
-{
- enum intel_engine_id id;
- IGT_TIMEOUT(end_time);
- unsigned long count;
-
- count = 0;
- do {
- for_each_engine(smoke->engine, smoke->gt, id) {
- struct i915_gem_context *ctx = smoke_context(smoke);
- int err;
-
- err = smoke_submit(smoke,
- ctx, random_priority(&smoke->prng),
- flags & BATCH ? smoke->batch : NULL);
- if (err)
- return err;
-
- count++;
- }
- } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
-
- pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
- count, flags, smoke->gt->info.num_engines, smoke->ncontext);
- return 0;
-}
-
-static int live_preempt_smoke(void *arg)
-{
- struct preempt_smoke smoke = {
- .gt = arg,
- .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
- .ncontext = 256,
- };
- const unsigned int phase[] = { 0, BATCH };
- struct igt_live_test t;
- int err = -ENOMEM;
- u32 *cs;
- int n;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(smoke.gt->i915))
- return 0;
-
- smoke.contexts = kmalloc_array(smoke.ncontext,
- sizeof(*smoke.contexts),
- GFP_KERNEL);
- if (!smoke.contexts)
- return -ENOMEM;
-
- smoke.batch =
- i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE);
- if (IS_ERR(smoke.batch)) {
- err = PTR_ERR(smoke.batch);
- goto err_free;
- }
-
- cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- goto err_batch;
- }
- for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
- cs[n] = MI_ARB_CHECK;
- cs[n] = MI_BATCH_BUFFER_END;
- i915_gem_object_flush_map(smoke.batch);
- i915_gem_object_unpin_map(smoke.batch);
-
- if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) {
- err = -EIO;
- goto err_batch;
- }
-
- for (n = 0; n < smoke.ncontext; n++) {
- smoke.contexts[n] = kernel_context(smoke.gt->i915);
- if (!smoke.contexts[n])
- goto err_ctx;
- }
-
- for (n = 0; n < ARRAY_SIZE(phase); n++) {
- err = smoke_crescendo(&smoke, phase[n]);
- if (err)
- goto err_ctx;
-
- err = smoke_random(&smoke, phase[n]);
- if (err)
- goto err_ctx;
- }
-
-err_ctx:
- if (igt_live_test_end(&t))
- err = -EIO;
-
- for (n = 0; n < smoke.ncontext; n++) {
- if (!smoke.contexts[n])
- break;
- kernel_context_close(smoke.contexts[n]);
- }
-
-err_batch:
- i915_gem_object_put(smoke.batch);
-err_free:
- kfree(smoke.contexts);
-
- return err;
-}
-
-static int nop_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling,
- unsigned int nctx,
- unsigned int flags)
-#define CHAIN BIT(0)
-{
- IGT_TIMEOUT(end_time);
- struct i915_request *request[16] = {};
- struct intel_context *ve[16];
- unsigned long n, prime, nc;
- struct igt_live_test t;
- ktime_t times[2] = {};
- int err;
-
- GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
-
- for (n = 0; n < nctx; n++) {
- ve[n] = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ve[n])) {
- err = PTR_ERR(ve[n]);
- nctx = n;
- goto out;
- }
-
- err = intel_context_pin(ve[n]);
- if (err) {
- intel_context_put(ve[n]);
- nctx = n;
- goto out;
- }
- }
-
- err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name);
- if (err)
- goto out;
-
- for_each_prime_number_from(prime, 1, 8192) {
- times[1] = ktime_get_raw();
-
- if (flags & CHAIN) {
- for (nc = 0; nc < nctx; nc++) {
- for (n = 0; n < prime; n++) {
- struct i915_request *rq;
-
- rq = i915_request_create(ve[nc]);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- if (request[nc])
- i915_request_put(request[nc]);
- request[nc] = i915_request_get(rq);
- i915_request_add(rq);
- }
- }
- } else {
- for (n = 0; n < prime; n++) {
- for (nc = 0; nc < nctx; nc++) {
- struct i915_request *rq;
-
- rq = i915_request_create(ve[nc]);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- if (request[nc])
- i915_request_put(request[nc]);
- request[nc] = i915_request_get(rq);
- i915_request_add(rq);
- }
- }
- }
-
- for (nc = 0; nc < nctx; nc++) {
- if (i915_request_wait(request[nc], 0, HZ / 10) < 0) {
- pr_err("%s(%s): wait for %llx:%lld timed out\n",
- __func__, ve[0]->engine->name,
- request[nc]->fence.context,
- request[nc]->fence.seqno);
-
- GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
- __func__, ve[0]->engine->name,
- request[nc]->fence.context,
- request[nc]->fence.seqno);
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- break;
- }
- }
-
- times[1] = ktime_sub(ktime_get_raw(), times[1]);
- if (prime == 1)
- times[0] = times[1];
-
- for (nc = 0; nc < nctx; nc++) {
- i915_request_put(request[nc]);
- request[nc] = NULL;
- }
-
- if (__igt_timeout(end_time, NULL))
- break;
- }
-
- err = igt_live_test_end(&t);
- if (err)
- goto out;
-
- pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
- nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
- prime, div64_u64(ktime_to_ns(times[1]), prime));
-
-out:
- if (igt_flush_test(gt->i915))
- err = -EIO;
-
- for (nc = 0; nc < nctx; nc++) {
- i915_request_put(request[nc]);
- intel_context_unpin(ve[nc]);
- intel_context_put(ve[nc]);
- }
- return err;
-}
-
-static unsigned int
-__select_siblings(struct intel_gt *gt,
- unsigned int class,
- struct intel_engine_cs **siblings,
- bool (*filter)(const struct intel_engine_cs *))
-{
- unsigned int n = 0;
- unsigned int inst;
-
- for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!gt->engine_class[class][inst])
- continue;
-
- if (filter && !filter(gt->engine_class[class][inst]))
- continue;
-
- siblings[n++] = gt->engine_class[class][inst];
- }
-
- return n;
-}
-
-static unsigned int
-select_siblings(struct intel_gt *gt,
- unsigned int class,
- struct intel_engine_cs **siblings)
-{
- return __select_siblings(gt, class, siblings, NULL);
-}
-
-static int live_virtual_engine(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- unsigned int class;
- int err;
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- for_each_engine(engine, gt, id) {
- err = nop_virtual_engine(gt, &engine, 1, 1, 0);
- if (err) {
- pr_err("Failed to wrap engine %s: err=%d\n",
- engine->name, err);
- return err;
- }
- }
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- int nsibling, n;
-
- nsibling = select_siblings(gt, class, siblings);
- if (nsibling < 2)
- continue;
-
- for (n = 1; n <= nsibling + 1; n++) {
- err = nop_virtual_engine(gt, siblings, nsibling,
- n, 0);
- if (err)
- return err;
- }
-
- err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int mask_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling)
-{
- struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
- struct intel_context *ve;
- struct igt_live_test t;
- unsigned int n;
- int err;
-
- /*
- * Check that by setting the execution mask on a request, we can
- * restrict it to our desired engine within the virtual engine.
- */
-
- ve = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ve)) {
- err = PTR_ERR(ve);
- goto out_close;
- }
-
- err = intel_context_pin(ve);
- if (err)
- goto out_put;
-
- err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
- if (err)
- goto out_unpin;
-
- for (n = 0; n < nsibling; n++) {
- request[n] = i915_request_create(ve);
- if (IS_ERR(request[n])) {
- err = PTR_ERR(request[n]);
- nsibling = n;
- goto out;
- }
-
- /* Reverse order as it's more likely to be unnatural */
- request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
-
- i915_request_get(request[n]);
- i915_request_add(request[n]);
- }
-
- for (n = 0; n < nsibling; n++) {
- if (i915_request_wait(request[n], 0, HZ / 10) < 0) {
- pr_err("%s(%s): wait for %llx:%lld timed out\n",
- __func__, ve->engine->name,
- request[n]->fence.context,
- request[n]->fence.seqno);
-
- GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
- __func__, ve->engine->name,
- request[n]->fence.context,
- request[n]->fence.seqno);
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto out;
- }
-
- if (request[n]->engine != siblings[nsibling - n - 1]) {
- pr_err("Executed on wrong sibling '%s', expected '%s'\n",
- request[n]->engine->name,
- siblings[nsibling - n - 1]->name);
- err = -EINVAL;
- goto out;
- }
- }
-
- err = igt_live_test_end(&t);
-out:
- if (igt_flush_test(gt->i915))
- err = -EIO;
-
- for (n = 0; n < nsibling; n++)
- i915_request_put(request[n]);
-
-out_unpin:
- intel_context_unpin(ve);
-out_put:
- intel_context_put(ve);
-out_close:
- return err;
-}
-
-static int live_virtual_mask(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class;
- int err;
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- unsigned int nsibling;
-
- nsibling = select_siblings(gt, class, siblings);
- if (nsibling < 2)
- continue;
-
- err = mask_virtual_engine(gt, siblings, nsibling);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int slicein_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling)
-{
- const long timeout = slice_timeout(siblings[0]);
- struct intel_context *ce;
- struct i915_request *rq;
- struct igt_spinner spin;
- unsigned int n;
- int err = 0;
-
- /*
- * Virtual requests must take part in timeslicing on the target engines.
- */
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for (n = 0; n < nsibling; n++) {
- ce = intel_context_create(siblings[n]);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- i915_request_add(rq);
- }
-
- ce = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = intel_context_create_request(ce);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- i915_request_get(rq);
- i915_request_add(rq);
- if (i915_request_wait(rq, 0, timeout) < 0) {
- GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
- __func__, rq->engine->name);
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- }
- i915_request_put(rq);
-
-out:
- igt_spinner_end(&spin);
- if (igt_flush_test(gt->i915))
- err = -EIO;
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int sliceout_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling)
-{
- const long timeout = slice_timeout(siblings[0]);
- struct intel_context *ce;
- struct i915_request *rq;
- struct igt_spinner spin;
- unsigned int n;
- int err = 0;
-
- /*
- * Virtual requests must allow others a fair timeslice.
- */
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- /* XXX We do not handle oversubscription and fairness with normal rq */
- for (n = 0; n < nsibling; n++) {
- ce = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- i915_request_add(rq);
- }
-
- for (n = 0; !err && n < nsibling; n++) {
- ce = intel_context_create(siblings[n]);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = intel_context_create_request(ce);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- i915_request_get(rq);
- i915_request_add(rq);
- if (i915_request_wait(rq, 0, timeout) < 0) {
- GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
- __func__, siblings[n]->name);
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- }
- i915_request_put(rq);
- }
-
-out:
- igt_spinner_end(&spin);
- if (igt_flush_test(gt->i915))
- err = -EIO;
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_virtual_slice(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class;
- int err;
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- unsigned int nsibling;
-
- nsibling = __select_siblings(gt, class, siblings,
- intel_engine_has_timeslices);
- if (nsibling < 2)
- continue;
-
- err = slicein_virtual_engine(gt, siblings, nsibling);
- if (err)
- return err;
-
- err = sliceout_virtual_engine(gt, siblings, nsibling);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int preserved_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling)
-{
- struct i915_request *last = NULL;
- struct intel_context *ve;
- struct i915_vma *scratch;
- struct igt_live_test t;
- unsigned int n;
- int err = 0;
- u32 *cs;
-
- scratch = create_scratch(siblings[0]->gt);
- if (IS_ERR(scratch))
- return PTR_ERR(scratch);
-
- err = i915_vma_sync(scratch);
- if (err)
- goto out_scratch;
-
- ve = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ve)) {
- err = PTR_ERR(ve);
- goto out_scratch;
- }
-
- err = intel_context_pin(ve);
- if (err)
- goto out_put;
-
- err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
- if (err)
- goto out_unpin;
-
- for (n = 0; n < NUM_GPR_DW; n++) {
- struct intel_engine_cs *engine = siblings[n % nsibling];
- struct i915_request *rq;
-
- rq = i915_request_create(ve);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_end;
- }
-
- i915_request_put(last);
- last = i915_request_get(rq);
-
- cs = intel_ring_begin(rq, 8);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- err = PTR_ERR(cs);
- goto out_end;
- }
-
- *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
- *cs++ = CS_GPR(engine, n);
- *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
- *cs++ = 0;
-
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW);
- *cs++ = n + 1;
-
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- /* Restrict this request to run on a particular engine */
- rq->execution_mask = engine->mask;
- i915_request_add(rq);
- }
-
- if (i915_request_wait(last, 0, HZ / 5) < 0) {
- err = -ETIME;
- goto out_end;
- }
-
- cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- goto out_end;
- }
-
- for (n = 0; n < NUM_GPR_DW; n++) {
- if (cs[n] != n) {
- pr_err("Incorrect value[%d] found for GPR[%d]\n",
- cs[n], n);
- err = -EINVAL;
- break;
- }
- }
-
- i915_gem_object_unpin_map(scratch->obj);
-
-out_end:
- if (igt_live_test_end(&t))
- err = -EIO;
- i915_request_put(last);
-out_unpin:
- intel_context_unpin(ve);
-out_put:
- intel_context_put(ve);
-out_scratch:
- i915_vma_unpin_and_release(&scratch, 0);
- return err;
-}
-
-static int live_virtual_preserved(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class;
-
- /*
- * Check that the context image retains non-privileged (user) registers
- * from one engine to the next. For this we check that the CS_GPR
- * are preserved.
- */
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- /* As we use CS_GPR we cannot run before they existed on all engines. */
- if (INTEL_GEN(gt->i915) < 9)
- return 0;
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- int nsibling, err;
-
- nsibling = select_siblings(gt, class, siblings);
- if (nsibling < 2)
- continue;
-
- err = preserved_virtual_engine(gt, siblings, nsibling);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int bond_virtual_engine(struct intel_gt *gt,
- unsigned int class,
- struct intel_engine_cs **siblings,
- unsigned int nsibling,
- unsigned int flags)
-#define BOND_SCHEDULE BIT(0)
-{
- struct intel_engine_cs *master;
- struct i915_request *rq[16];
- enum intel_engine_id id;
- struct igt_spinner spin;
- unsigned long n;
- int err;
-
- /*
- * A set of bonded requests is intended to be run concurrently
- * across a number of engines. We use one request per-engine
- * and a magic fence to schedule each of the bonded requests
- * at the same time. A consequence of our current scheduler is that
- * we only move requests to the HW ready queue when the request
- * becomes ready, that is when all of its prerequisite fences have
- * been signaled. As one of those fences is the master submit fence,
- * there is a delay on all secondary fences as the HW may be
- * currently busy. Equally, as all the requests are independent,
- * they may have other fences that delay individual request
- * submission to HW. Ergo, we do not guarantee that all requests are
- * immediately submitted to HW at the same time, just that if the
- * rules are abided by, they are ready at the same time as the
- * first is submitted. Userspace can embed semaphores in its batch
- * to ensure parallel execution of its phases as it requires.
- * Though naturally it gets requested that perhaps the scheduler should
- * take care of parallel execution, even across preemption events on
- * different HW. (The proper answer is of course "lalalala".)
- *
- * With the submit-fence, we have identified three possible phases
- * of synchronisation depending on the master fence: queued (not
- * ready), executing, and signaled. The first two are quite simple
- * and checked below. However, the signaled master fence handling is
- * contentious. Currently we do not distinguish between a signaled
- * fence and an expired fence, as once signaled it does not convey
- * any information about the previous execution. It may even be freed
- * and hence checking later it may not exist at all. Ergo we currently
- * do not apply the bonding constraint for an already signaled fence,
- * as our expectation is that it should not constrain the secondaries
- * and is outside of the scope of the bonded request API (i.e. all
- * userspace requests are meant to be running in parallel). As
- * it imposes no constraint, and is effectively a no-op, we do not
- * check below as normal execution flows are checked extensively above.
- *
- * XXX Is the degenerate handling of signaled submit fences the
- * expected behaviour for userpace?
- */
-
- GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- err = 0;
- rq[0] = ERR_PTR(-ENOMEM);
- for_each_engine(master, gt, id) {
- struct i915_sw_fence fence = {};
- struct intel_context *ce;
-
- if (master->class == class)
- continue;
-
- ce = intel_context_create(master);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
-
- rq[0] = igt_spinner_create_request(&spin, ce, MI_NOOP);
- intel_context_put(ce);
- if (IS_ERR(rq[0])) {
- err = PTR_ERR(rq[0]);
- goto out;
- }
- i915_request_get(rq[0]);
-
- if (flags & BOND_SCHEDULE) {
- onstack_fence_init(&fence);
- err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit,
- &fence,
- GFP_KERNEL);
- }
-
- i915_request_add(rq[0]);
- if (err < 0)
- goto out;
-
- if (!(flags & BOND_SCHEDULE) &&
- !igt_wait_for_spinner(&spin, rq[0])) {
- err = -EIO;
- goto out;
- }
-
- for (n = 0; n < nsibling; n++) {
- struct intel_context *ve;
-
- ve = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ve)) {
- err = PTR_ERR(ve);
- onstack_fence_fini(&fence);
- goto out;
- }
-
- err = intel_virtual_engine_attach_bond(ve->engine,
- master,
- siblings[n]);
- if (err) {
- intel_context_put(ve);
- onstack_fence_fini(&fence);
- goto out;
- }
-
- err = intel_context_pin(ve);
- intel_context_put(ve);
- if (err) {
- onstack_fence_fini(&fence);
- goto out;
- }
-
- rq[n + 1] = i915_request_create(ve);
- intel_context_unpin(ve);
- if (IS_ERR(rq[n + 1])) {
- err = PTR_ERR(rq[n + 1]);
- onstack_fence_fini(&fence);
- goto out;
- }
- i915_request_get(rq[n + 1]);
-
- err = i915_request_await_execution(rq[n + 1],
- &rq[0]->fence,
- ve->engine->bond_execute);
- i915_request_add(rq[n + 1]);
- if (err < 0) {
- onstack_fence_fini(&fence);
- goto out;
- }
- }
- onstack_fence_fini(&fence);
- intel_engine_flush_submission(master);
- igt_spinner_end(&spin);
-
- if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
- pr_err("Master request did not execute (on %s)!\n",
- rq[0]->engine->name);
- err = -EIO;
- goto out;
- }
-
- for (n = 0; n < nsibling; n++) {
- if (i915_request_wait(rq[n + 1], 0,
- MAX_SCHEDULE_TIMEOUT) < 0) {
- err = -EIO;
- goto out;
- }
-
- if (rq[n + 1]->engine != siblings[n]) {
- pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n",
- siblings[n]->name,
- rq[n + 1]->engine->name,
- rq[0]->engine->name);
- err = -EINVAL;
- goto out;
- }
- }
-
- for (n = 0; !IS_ERR(rq[n]); n++)
- i915_request_put(rq[n]);
- rq[0] = ERR_PTR(-ENOMEM);
- }
-
-out:
- for (n = 0; !IS_ERR(rq[n]); n++)
- i915_request_put(rq[n]);
- if (igt_flush_test(gt->i915))
- err = -EIO;
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_virtual_bond(void *arg)
-{
- static const struct phase {
- const char *name;
- unsigned int flags;
- } phases[] = {
- { "", 0 },
- { "schedule", BOND_SCHEDULE },
- { },
- };
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class;
- int err;
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- const struct phase *p;
- int nsibling;
-
- nsibling = select_siblings(gt, class, siblings);
- if (nsibling < 2)
- continue;
-
- for (p = phases; p->name; p++) {
- err = bond_virtual_engine(gt,
- class, siblings, nsibling,
- p->flags);
- if (err) {
- pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
- __func__, p->name, class, nsibling, err);
- return err;
- }
- }
- }
-
- return 0;
-}
-
-static int reset_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling)
-{
- struct intel_engine_cs *engine;
- struct intel_context *ve;
- struct igt_spinner spin;
- struct i915_request *rq;
- unsigned int n;
- int err = 0;
-
- /*
- * In order to support offline error capture for fast preempt reset,
- * we need to decouple the guilty request and ensure that it and its
- * descendents are not executed while the capture is in progress.
- */
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- ve = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ve)) {
- err = PTR_ERR(ve);
- goto out_spin;
- }
-
- for (n = 0; n < nsibling; n++)
- st_engine_heartbeat_disable(siblings[n]);
-
- rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_heartbeat;
- }
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(&spin, rq)) {
- intel_gt_set_wedged(gt);
- err = -ETIME;
- goto out_heartbeat;
- }
-
- engine = rq->engine;
- GEM_BUG_ON(engine == ve->engine);
-
- /* Take ownership of the reset and tasklet */
- if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &gt->reset.flags)) {
- intel_gt_set_wedged(gt);
- err = -EBUSY;
- goto out_heartbeat;
- }
- tasklet_disable(&engine->execlists.tasklet);
-
- engine->execlists.tasklet.func(engine->execlists.tasklet.data);
- GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
-
- /* Fake a preemption event; failed of course */
- spin_lock_irq(&engine->active.lock);
- __unwind_incomplete_requests(engine);
- spin_unlock_irq(&engine->active.lock);
- GEM_BUG_ON(rq->engine != ve->engine);
-
- /* Reset the engine while keeping our active request on hold */
- execlists_hold(engine, rq);
- GEM_BUG_ON(!i915_request_on_hold(rq));
-
- intel_engine_reset(engine, NULL);
- GEM_BUG_ON(rq->fence.error != -EIO);
-
- /* Release our grasp on the engine, letting CS flow again */
- tasklet_enable(&engine->execlists.tasklet);
- clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
-
- /* Check that we do not resubmit the held request */
- i915_request_get(rq);
- if (!i915_request_wait(rq, 0, HZ / 5)) {
- pr_err("%s: on hold request completed!\n",
- engine->name);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto out_rq;
- }
- GEM_BUG_ON(!i915_request_on_hold(rq));
-
- /* But is resubmitted on release */
- execlists_unhold(engine, rq);
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- pr_err("%s: held request did not complete!\n",
- engine->name);
- intel_gt_set_wedged(gt);
- err = -ETIME;
- }
-
-out_rq:
- i915_request_put(rq);
-out_heartbeat:
- for (n = 0; n < nsibling; n++)
- st_engine_heartbeat_enable(siblings[n]);
-
- intel_context_put(ve);
-out_spin:
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_virtual_reset(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class;
-
- /*
- * Check that we handle a reset event within a virtual engine.
- * Only the physical engine is reset, but we have to check the flow
- * of the virtual requests around the reset, and make sure it is not
- * forgotten.
- */
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- if (!intel_has_reset_engine(gt))
- return 0;
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- int nsibling, err;
-
- nsibling = select_siblings(gt, class, siblings);
- if (nsibling < 2)
- continue;
-
- err = reset_virtual_engine(gt, siblings, nsibling);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-int intel_execlists_live_selftests(struct drm_i915_private *i915)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(live_sanitycheck),
- SUBTEST(live_unlite_switch),
- SUBTEST(live_unlite_preempt),
- SUBTEST(live_unlite_ring),
- SUBTEST(live_pin_rewind),
- SUBTEST(live_hold_reset),
- SUBTEST(live_error_interrupt),
- SUBTEST(live_timeslice_preempt),
- SUBTEST(live_timeslice_rewind),
- SUBTEST(live_timeslice_queue),
- SUBTEST(live_timeslice_nopreempt),
- SUBTEST(live_busywait_preempt),
- SUBTEST(live_preempt),
- SUBTEST(live_late_preempt),
- SUBTEST(live_nopreempt),
- SUBTEST(live_preempt_cancel),
- SUBTEST(live_suppress_self_preempt),
- SUBTEST(live_chain_preempt),
- SUBTEST(live_preempt_ring),
- SUBTEST(live_preempt_gang),
- SUBTEST(live_preempt_timeout),
- SUBTEST(live_preempt_user),
- SUBTEST(live_preempt_smoke),
- SUBTEST(live_virtual_engine),
- SUBTEST(live_virtual_mask),
- SUBTEST(live_virtual_preserved),
- SUBTEST(live_virtual_slice),
- SUBTEST(live_virtual_bond),
- SUBTEST(live_virtual_reset),
- };
-
- if (!HAS_EXECLISTS(i915))
- return 0;
-
- if (intel_gt_is_wedged(&i915->gt))
- return 0;
-
- return intel_gt_live_subtests(tests, &i915->gt);
-}
-
static int emit_semaphore_signal(struct intel_context *ce, void *slot)
{
const u32 offset =
@@ -4775,9 +139,10 @@ static int live_lrc_layout(void *arg)
* match the layout saved by HW.
*/
- lrc = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ lrc = (u32 *)__get_free_page(GFP_KERNEL); /* requires page alignment */
if (!lrc)
return -ENOMEM;
+ GEM_BUG_ON(offset_in_page(lrc));
err = 0;
for_each_engine(engine, gt, id) {
@@ -4794,15 +159,12 @@ static int live_lrc_layout(void *arg)
}
hw += LRC_STATE_OFFSET / sizeof(*hw);
- execlists_init_reg_state(memset(lrc, POISON_INUSE, PAGE_SIZE),
- engine->kernel_context,
- engine,
- engine->kernel_context->ring,
- true);
+ __lrc_init_regs(memset(lrc, POISON_INUSE, PAGE_SIZE),
+ engine->kernel_context, engine, true);
dw = 0;
do {
- u32 lri = hw[dw];
+ u32 lri = READ_ONCE(hw[dw]);
if (lri == 0) {
dw++;
@@ -4835,9 +197,11 @@ static int live_lrc_layout(void *arg)
dw++;
while (lri) {
- if (hw[dw] != lrc[dw]) {
+ u32 offset = READ_ONCE(hw[dw]);
+
+ if (offset != lrc[dw]) {
pr_err("%s: Different registers found at dword %d, expected %x, found %x\n",
- engine->name, dw, hw[dw], lrc[dw]);
+ engine->name, dw, offset, lrc[dw]);
err = -EINVAL;
break;
}
@@ -4849,7 +213,7 @@ static int live_lrc_layout(void *arg)
dw += 2;
lri -= 2;
}
- } while ((lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+ } while (!err && (lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
if (err) {
pr_info("%s: HW register image:\n", engine->name);
@@ -4864,7 +228,7 @@ static int live_lrc_layout(void *arg)
break;
}
- kfree(lrc);
+ free_page((unsigned long)lrc);
return err;
}
@@ -5249,6 +613,10 @@ static int __live_lrc_gpr(struct intel_engine_cs *engine,
err = emit_semaphore_signal(engine->kernel_context, slot);
if (err)
goto err_rq;
+
+ err = wait_for_submit(engine, rq, HZ / 2);
+ if (err)
+ goto err_rq;
} else {
slot[0] = 1;
wmb();
@@ -6242,16 +1610,17 @@ static void garbage_reset(struct intel_engine_cs *engine,
const unsigned int bit = I915_RESET_ENGINE + engine->id;
unsigned long *lock = &engine->gt->reset.flags;
- if (test_and_set_bit(bit, lock))
- return;
-
- tasklet_disable(&engine->execlists.tasklet);
+ local_bh_disable();
+ if (!test_and_set_bit(bit, lock)) {
+ tasklet_disable(&engine->execlists.tasklet);
- if (!rq->fence.error)
- intel_engine_reset(engine, NULL);
+ if (!rq->fence.error)
+ __intel_engine_reset_bh(engine, NULL);
- tasklet_enable(&engine->execlists.tasklet);
- clear_and_wake_up_bit(bit, lock);
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(bit, lock);
+ }
+ local_bh_enable();
}
static struct i915_request *garbage(struct intel_context *ce,
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index b25eba50c88e..cf373c72359e 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -5,6 +5,7 @@
*/
#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
#include "i915_selftest.h"
#include "gem/selftests/mock_context.h"
@@ -56,33 +57,6 @@ static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
return err;
}
-static struct i915_vma *create_scratch(struct intel_gt *gt)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int err;
-
- obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- return vma;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
- if (err) {
- i915_gem_object_put(obj);
- return ERR_PTR(err);
- }
-
- return vma;
-}
-
static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
{
struct drm_i915_mocs_table table;
@@ -101,7 +75,7 @@ static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
arg->mocs = table;
- arg->scratch = create_scratch(gt);
+ arg->scratch = __vm_create_scratch_for_read(&gt->ggtt->vm, PAGE_SIZE);
if (IS_ERR(arg->scratch))
return PTR_ERR(arg->scratch);
@@ -125,7 +99,7 @@ static void live_mocs_fini(struct live_mocs *arg)
static int read_regs(struct i915_request *rq,
u32 addr, unsigned int count,
- uint32_t *offset)
+ u32 *offset)
{
unsigned int i;
u32 *cs;
@@ -153,7 +127,7 @@ static int read_regs(struct i915_request *rq,
static int read_mocs_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table,
- uint32_t *offset)
+ u32 *offset)
{
u32 addr;
@@ -167,7 +141,7 @@ static int read_mocs_table(struct i915_request *rq,
static int read_l3cc_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table,
- uint32_t *offset)
+ u32 *offset)
{
u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
@@ -176,7 +150,7 @@ static int read_l3cc_table(struct i915_request *rq,
static int check_mocs_table(struct intel_engine_cs *engine,
const struct drm_i915_mocs_table *table,
- uint32_t **vaddr)
+ u32 **vaddr)
{
unsigned int i;
u32 expect;
@@ -205,7 +179,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset)
static int check_l3cc_table(struct intel_engine_cs *engine,
const struct drm_i915_mocs_table *table,
- uint32_t **vaddr)
+ u32 **vaddr)
{
/* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
@@ -361,29 +335,34 @@ static int active_engine_reset(struct intel_context *ce,
static int __live_mocs_reset(struct live_mocs *mocs,
struct intel_context *ce)
{
+ struct intel_gt *gt = ce->engine->gt;
int err;
- err = intel_engine_reset(ce->engine, "mocs");
- if (err)
- return err;
+ if (intel_has_reset_engine(gt)) {
+ err = intel_engine_reset(ce->engine, "mocs");
+ if (err)
+ return err;
- err = check_mocs_engine(mocs, ce);
- if (err)
- return err;
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
- err = active_engine_reset(ce, "mocs");
- if (err)
- return err;
+ err = active_engine_reset(ce, "mocs");
+ if (err)
+ return err;
- err = check_mocs_engine(mocs, ce);
- if (err)
- return err;
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+ }
- intel_gt_reset(ce->engine->gt, ce->engine->mask, "mocs");
+ if (intel_has_gpu_reset(gt)) {
+ intel_gt_reset(gt, ce->engine->mask, "mocs");
- err = check_mocs_engine(mocs, ce);
- if (err)
- return err;
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -398,9 +377,6 @@ static int live_mocs_reset(void *arg)
/* Check the mocs setup is retained over per-engine and global resets */
- if (!intel_has_reset_engine(gt))
- return 0;
-
err = live_mocs_init(&mocs, gt);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 64ef5ee5decf..61abc0556601 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -6,6 +6,7 @@
#include "intel_context.h"
#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
#include "intel_gt_requests.h"
#include "intel_ring.h"
#include "selftest_rc6.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index ef5aeebbeeb0..8784257ec808 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -9,6 +9,7 @@
#include "i915_memcpy.h"
#include "i915_selftest.h"
+#include "intel_gpu_commands.h"
#include "selftests/igt_reset.h"
#include "selftests/igt_atomic.h"
#include "selftests/igt_spinner.h"
@@ -95,10 +96,10 @@ __igt_reset_stolen(struct intel_gt *gt,
if (!__drm_mm_interval_first(&gt->i915->mm.stolen,
page << PAGE_SHIFT,
((page + 1) << PAGE_SHIFT) - 1))
- memset32(s, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
+ memset_io(s, STACK_MAGIC, PAGE_SIZE);
- in = s;
- if (i915_memcpy_from_wc(tmp, s, PAGE_SIZE))
+ in = (void __force *)s;
+ if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
in = tmp;
crc[page] = crc32_le(0, in, PAGE_SIZE);
@@ -133,8 +134,8 @@ __igt_reset_stolen(struct intel_gt *gt,
ggtt->error_capture.start,
PAGE_SIZE);
- in = s;
- if (i915_memcpy_from_wc(tmp, s, PAGE_SIZE))
+ in = (void __force *)s;
+ if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
in = tmp;
x = crc32_le(0, in, PAGE_SIZE);
@@ -320,17 +321,25 @@ static int igt_atomic_engine_reset(void *arg)
goto out_unlock;
for_each_engine(engine, gt, id) {
- tasklet_disable(&engine->execlists.tasklet);
+ struct tasklet_struct *t = &engine->execlists.tasklet;
+
+ if (t->func)
+ tasklet_disable(t);
intel_engine_pm_get(engine);
for (p = igt_atomic_phases; p->name; p++) {
GEM_TRACE("intel_engine_reset(%s) under %s\n",
engine->name, p->name);
+ if (strcmp(p->name, "softirq"))
+ local_bh_disable();
p->critical_section_begin();
- err = intel_engine_reset(engine, NULL);
+ err = __intel_engine_reset_bh(engine, NULL);
p->critical_section_end();
+ if (strcmp(p->name, "softirq"))
+ local_bh_enable();
+
if (err) {
pr_err("intel_engine_reset(%s) failed under %s\n",
engine->name, p->name);
@@ -339,7 +348,10 @@ static int igt_atomic_engine_reset(void *arg)
}
intel_engine_pm_put(engine);
- tasklet_enable(&engine->execlists.tasklet);
+ if (t->func) {
+ tasklet_enable(t);
+ tasklet_hi_schedule(t);
+ }
if (err)
break;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index aa5675ecb5cc..967641fee42a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -185,7 +185,10 @@ static u8 rps_set_check(struct intel_rps *rps, u8 freq)
{
mutex_lock(&rps->lock);
GEM_BUG_ON(!intel_rps_is_active(rps));
- intel_rps_set(rps, freq);
+ if (wait_for(!intel_rps_set(rps, freq), 50)) {
+ mutex_unlock(&rps->lock);
+ return 0;
+ }
GEM_BUG_ON(rps->last_freq != freq);
mutex_unlock(&rps->lock);
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 2edf2b15885f..6f3a3687ef0f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -9,6 +9,7 @@
#include "intel_context.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
#include "intel_gt.h"
#include "intel_gt_requests.h"
#include "intel_ring.h"
@@ -1090,12 +1091,6 @@ static int live_hwsp_read(void *arg)
}
count++;
- if (8 * watcher[1].rq->ring->emit >
- 3 * watcher[1].rq->ring->size) {
- i915_request_put(rq);
- break;
- }
-
/* Flush the timeline before manually wrapping again */
if (i915_request_wait(rq,
I915_WAIT_INTERRUPTIBLE,
@@ -1104,9 +1099,14 @@ static int live_hwsp_read(void *arg)
i915_request_put(rq);
goto out;
}
-
retire_requests(tl);
i915_request_put(rq);
+
+ /* Single requests are limited to half a ring at most */
+ if (8 * watcher[1].rq->ring->emit >
+ 3 * watcher[1].rq->ring->size)
+ break;
+
} while (!__igt_timeout(end_time, NULL));
WRITE_ONCE(*(u32 *)tl->hwsp_seqno, 0xdeadbeef);
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 61a0532d0f3d..2070b91cb607 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -95,8 +95,9 @@ reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
}
static struct drm_i915_gem_object *
-read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
+read_nonprivs(struct intel_context *ce)
{
+ struct intel_engine_cs *engine = ce->engine;
const u32 base = engine->mmio_base;
struct drm_i915_gem_object *result;
struct i915_request *rq;
@@ -130,7 +131,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
if (err)
goto err_obj;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_pin;
@@ -145,7 +146,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
goto err_req;
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
- if (INTEL_GEN(ctx->i915) >= 8)
+ if (INTEL_GEN(engine->i915) >= 8)
srm++;
cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
@@ -200,16 +201,16 @@ print_results(const struct intel_engine_cs *engine, const u32 *results)
}
}
-static int check_whitelist(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static int check_whitelist(struct intel_context *ce)
{
+ struct intel_engine_cs *engine = ce->engine;
struct drm_i915_gem_object *results;
struct intel_wedge_me wedge;
u32 *vaddr;
int err;
int i;
- results = read_nonprivs(ctx, engine);
+ results = read_nonprivs(ce);
if (IS_ERR(results))
return PTR_ERR(results);
@@ -293,8 +294,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
int (*reset)(struct intel_engine_cs *),
const char *name)
{
- struct drm_i915_private *i915 = engine->i915;
- struct i915_gem_context *ctx, *tmp;
+ struct intel_context *ce, *tmp;
struct igt_spinner spin;
intel_wakeref_t wakeref;
int err;
@@ -302,15 +302,15 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
engine->whitelist.count, engine->name, name);
- ctx = kernel_context(i915);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
err = igt_spinner_init(&spin, engine->gt);
if (err)
goto out_ctx;
- err = check_whitelist(ctx, engine);
+ err = check_whitelist(ce);
if (err) {
pr_err("Invalid whitelist *before* %s reset!\n", name);
goto out_spin;
@@ -330,22 +330,22 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
goto out_spin;
}
- err = check_whitelist(ctx, engine);
+ err = check_whitelist(ce);
if (err) {
pr_err("Whitelist not preserved in context across %s reset!\n",
name);
goto out_spin;
}
- tmp = kernel_context(i915);
+ tmp = intel_context_create(engine);
if (IS_ERR(tmp)) {
err = PTR_ERR(tmp);
goto out_spin;
}
- kernel_context_close(ctx);
- ctx = tmp;
+ intel_context_put(ce);
+ ce = tmp;
- err = check_whitelist(ctx, engine);
+ err = check_whitelist(ce);
if (err) {
pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
name);
@@ -355,7 +355,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
out_spin:
igt_spinner_fini(&spin);
out_ctx:
- kernel_context_close(ctx);
+ intel_context_put(ce);
return err;
}
@@ -486,10 +486,11 @@ static int check_dirty_whitelist(struct intel_context *ce)
struct intel_engine_cs *engine = ce->engine;
struct i915_vma *scratch;
struct i915_vma *batch;
- int err = 0, i, v;
+ int err = 0, i, v, sz;
u32 *cs, *results;
- scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1);
+ sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
+ scratch = __vm_create_scratch_for_read(ce->vm, sz);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
@@ -786,15 +787,15 @@ out:
return err;
}
-static int read_whitelisted_registers(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+static int read_whitelisted_registers(struct intel_context *ce,
struct i915_vma *results)
{
+ struct intel_engine_cs *engine = ce->engine;
struct i915_request *rq;
int i, err = 0;
u32 srm, *cs;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -807,7 +808,7 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
goto err_req;
srm = MI_STORE_REGISTER_MEM;
- if (INTEL_GEN(ctx->i915) >= 8)
+ if (INTEL_GEN(engine->i915) >= 8)
srm++;
cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
@@ -834,18 +835,15 @@ err_req:
return request_add_sync(rq, err);
}
-static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static int scrub_whitelisted_registers(struct intel_context *ce)
{
- struct i915_address_space *vm;
+ struct intel_engine_cs *engine = ce->engine;
struct i915_request *rq;
struct i915_vma *batch;
int i, err = 0;
u32 *cs;
- vm = i915_gem_context_get_vm_rcu(ctx);
- batch = create_batch(vm);
- i915_vm_put(vm);
+ batch = create_batch(ce->vm);
if (IS_ERR(batch))
return PTR_ERR(batch);
@@ -873,7 +871,7 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
i915_gem_object_flush_map(batch->obj);
intel_gt_chipset_flush(engine->gt);
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
@@ -1016,7 +1014,6 @@ static int live_isolated_whitelist(void *arg)
{
struct intel_gt *gt = arg;
struct {
- struct i915_gem_context *ctx;
struct i915_vma *scratch[2];
} client[2] = {};
struct intel_engine_cs *engine;
@@ -1032,61 +1029,57 @@ static int live_isolated_whitelist(void *arg)
return 0;
for (i = 0; i < ARRAY_SIZE(client); i++) {
- struct i915_address_space *vm;
- struct i915_gem_context *c;
-
- c = kernel_context(gt->i915);
- if (IS_ERR(c)) {
- err = PTR_ERR(c);
- goto err;
- }
-
- vm = i915_gem_context_get_vm_rcu(c);
-
- client[i].scratch[0] = create_scratch(vm, 1024);
+ client[i].scratch[0] =
+ __vm_create_scratch_for_read(gt->vm, 4096);
if (IS_ERR(client[i].scratch[0])) {
err = PTR_ERR(client[i].scratch[0]);
- i915_vm_put(vm);
- kernel_context_close(c);
goto err;
}
- client[i].scratch[1] = create_scratch(vm, 1024);
+ client[i].scratch[1] =
+ __vm_create_scratch_for_read(gt->vm, 4096);
if (IS_ERR(client[i].scratch[1])) {
err = PTR_ERR(client[i].scratch[1]);
i915_vma_unpin_and_release(&client[i].scratch[0], 0);
- i915_vm_put(vm);
- kernel_context_close(c);
goto err;
}
-
- client[i].ctx = c;
- i915_vm_put(vm);
}
for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2];
+
if (!engine->kernel_context->vm)
continue;
if (!whitelist_writable_count(engine))
continue;
+ ce[0] = intel_context_create(engine);
+ if (IS_ERR(ce[0])) {
+ err = PTR_ERR(ce[0]);
+ break;
+ }
+ ce[1] = intel_context_create(engine);
+ if (IS_ERR(ce[1])) {
+ err = PTR_ERR(ce[1]);
+ intel_context_put(ce[0]);
+ break;
+ }
+
/* Read default values */
- err = read_whitelisted_registers(client[0].ctx, engine,
- client[0].scratch[0]);
+ err = read_whitelisted_registers(ce[0], client[0].scratch[0]);
if (err)
- goto err;
+ goto err_ce;
/* Try to overwrite registers (should only affect ctx0) */
- err = scrub_whitelisted_registers(client[0].ctx, engine);
+ err = scrub_whitelisted_registers(ce[0]);
if (err)
- goto err;
+ goto err_ce;
/* Read values from ctx1, we expect these to be defaults */
- err = read_whitelisted_registers(client[1].ctx, engine,
- client[1].scratch[0]);
+ err = read_whitelisted_registers(ce[1], client[1].scratch[0]);
if (err)
- goto err;
+ goto err_ce;
/* Verify that both reads return the same default values */
err = check_whitelisted_registers(engine,
@@ -1094,31 +1087,29 @@ static int live_isolated_whitelist(void *arg)
client[1].scratch[0],
result_eq);
if (err)
- goto err;
+ goto err_ce;
/* Read back the updated values in ctx0 */
- err = read_whitelisted_registers(client[0].ctx, engine,
- client[0].scratch[1]);
+ err = read_whitelisted_registers(ce[0], client[0].scratch[1]);
if (err)
- goto err;
+ goto err_ce;
/* User should be granted privilege to overwhite regs */
err = check_whitelisted_registers(engine,
client[0].scratch[0],
client[0].scratch[1],
result_neq);
+err_ce:
+ intel_context_put(ce[1]);
+ intel_context_put(ce[0]);
if (err)
- goto err;
+ break;
}
err:
for (i = 0; i < ARRAY_SIZE(client); i++) {
- if (!client[i].ctx)
- break;
-
i915_vma_unpin_and_release(&client[i].scratch[1], 0);
i915_vma_unpin_and_release(&client[i].scratch[0], 0);
- kernel_context_close(client[i].ctx);
}
if (igt_flush_test(gt->i915))
@@ -1128,18 +1119,21 @@ err:
}
static bool
-verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
+verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists,
const char *str)
{
- struct drm_i915_private *i915 = ctx->i915;
- struct i915_gem_engines_iter it;
- struct intel_context *ce;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
bool ok = true;
- ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
+ ok &= wa_list_verify(gt->uncore, &lists->gt_wa_list, str);
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
- for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
- enum intel_engine_id id = ce->engine->id;
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return false;
ok &= engine_wa_list_verify(ce,
&lists->engine[id].wa_list,
@@ -1148,6 +1142,8 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
ok &= engine_wa_list_verify(ce,
&lists->engine[id].ctx_wa_list,
str) == 0;
+
+ intel_context_put(ce);
}
return ok;
@@ -1157,7 +1153,6 @@ static int
live_gpu_reset_workarounds(void *arg)
{
struct intel_gt *gt = arg;
- struct i915_gem_context *ctx;
intel_wakeref_t wakeref;
struct wa_lists lists;
bool ok;
@@ -1165,12 +1160,6 @@ live_gpu_reset_workarounds(void *arg)
if (!intel_has_gpu_reset(gt))
return 0;
- ctx = kernel_context(gt->i915);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- i915_gem_context_lock_engines(ctx);
-
pr_info("Verifying after GPU reset...\n");
igt_global_reset_lock(gt);
@@ -1178,17 +1167,15 @@ live_gpu_reset_workarounds(void *arg)
reference_lists_init(gt, &lists);
- ok = verify_wa_lists(ctx, &lists, "before reset");
+ ok = verify_wa_lists(gt, &lists, "before reset");
if (!ok)
goto out;
intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
- ok = verify_wa_lists(ctx, &lists, "after reset");
+ ok = verify_wa_lists(gt, &lists, "after reset");
out:
- i915_gem_context_unlock_engines(ctx);
- kernel_context_close(ctx);
reference_lists_fini(gt, &lists);
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(gt);
@@ -1200,8 +1187,8 @@ static int
live_engine_reset_workarounds(void *arg)
{
struct intel_gt *gt = arg;
- struct i915_gem_engines_iter it;
- struct i915_gem_context *ctx;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
struct intel_context *ce;
struct igt_spinner spin;
struct i915_request *rq;
@@ -1212,30 +1199,30 @@ live_engine_reset_workarounds(void *arg)
if (!intel_has_reset_engine(gt))
return 0;
- ctx = kernel_context(gt->i915);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
igt_global_reset_lock(gt);
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
reference_lists_init(gt, &lists);
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
- struct intel_engine_cs *engine = ce->engine;
+ for_each_engine(engine, gt, id) {
bool ok;
pr_info("Verifying after %s reset...\n", engine->name);
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ break;
+ }
- ok = verify_wa_lists(ctx, &lists, "before reset");
+ ok = verify_wa_lists(gt, &lists, "before reset");
if (!ok) {
ret = -ESRCH;
goto err;
}
- intel_engine_reset(engine, "live_workarounds");
+ intel_engine_reset(engine, "live_workarounds:idle");
- ok = verify_wa_lists(ctx, &lists, "after idle reset");
+ ok = verify_wa_lists(gt, &lists, "after idle reset");
if (!ok) {
ret = -ESRCH;
goto err;
@@ -1259,23 +1246,26 @@ live_engine_reset_workarounds(void *arg)
goto err;
}
- intel_engine_reset(engine, "live_workarounds");
+ intel_engine_reset(engine, "live_workarounds:active");
igt_spinner_end(&spin);
igt_spinner_fini(&spin);
- ok = verify_wa_lists(ctx, &lists, "after busy reset");
+ ok = verify_wa_lists(gt, &lists, "after busy reset");
if (!ok) {
ret = -ESRCH;
goto err;
}
- }
+
err:
- i915_gem_context_unlock_engines(ctx);
+ intel_context_put(ce);
+ if (ret)
+ break;
+ }
+
reference_lists_fini(gt, &lists);
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(gt);
- kernel_context_close(ctx);
igt_flush_test(gt->i915);
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index 5982b62f913d..a4d8fc9e2374 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -33,7 +33,7 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
struct file *file;
void *ptr;
- if (obj->ops == &i915_gem_shmem_ops) {
+ if (i915_gem_object_is_shmem(obj)) {
file = obj->base.filp;
atomic_long_inc(&file->f_count);
return file;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 2a343a977987..4545e90e3bf1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -579,20 +579,8 @@ int intel_guc_reset_engine(struct intel_guc *guc,
*/
int intel_guc_resume(struct intel_guc *guc)
{
- u32 action[] = {
- INTEL_GUC_ACTION_EXIT_S_STATE,
- GUC_POWER_D0,
- };
-
- /*
- * If GuC communication is enabled but submission is not supported,
- * we do not need to resume the GuC but we do need to enable the
- * GuC communication on resume (above).
- */
- if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
- return 0;
-
- return intel_guc_send(guc, action, ARRAY_SIZE(action));
+ /* XXX: to be implemented with submission interface rework */
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index e84ab67b317d..bc2ba7d0626c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -47,13 +47,6 @@ struct intel_guc {
struct i915_vma *stage_desc_pool;
void *stage_desc_pool_vaddr;
- struct i915_vma *workqueue;
- void *workqueue_vaddr;
- spinlock_t wq_lock;
-
- struct i915_vma *proc_desc;
- void *proc_desc_vaddr;
-
/* Control params for fw initialization */
u32 params[GUC_CTL_MAX_DWORDS];
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 5212ff844292..17526717368c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -4,6 +4,7 @@
*/
#include "gt/intel_gt.h"
+#include "gt/intel_lrc.h"
#include "intel_guc_ads.h"
#include "intel_uc.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index f9d0907ea1a5..2270d6b3b272 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -76,7 +76,6 @@ static inline bool guc_ready(struct intel_uncore *uncore, u32 *status)
static int guc_wait_ucode(struct intel_uncore *uncore)
{
- struct drm_device *drm = &uncore->i915->drm;
u32 status;
int ret;
@@ -89,11 +88,11 @@ static int guc_wait_ucode(struct intel_uncore *uncore)
* attempt the ucode load again if this happens.)
*/
ret = wait_for(guc_ready(uncore, &status), 100);
- DRM_DEBUG_DRIVER("GuC status %#x\n", status);
-
if (ret) {
- drm_err(drm, "GuC load failed: status = 0x%08X\n", status);
- drm_err(drm, "GuC load failed: status: Reset = %d, "
+ struct drm_device *drm = &uncore->i915->drm;
+
+ drm_dbg(drm, "GuC load failed: status = 0x%08X\n", status);
+ drm_dbg(drm, "GuC load failed: status: Reset = %d, "
"BootROM = 0x%02X, UKernel = 0x%02X, "
"MIA = 0x%02X, Auth = 0x%02X\n",
REG_FIELD_GET(GS_MIA_IN_RESET, status),
@@ -103,12 +102,12 @@ static int guc_wait_ucode(struct intel_uncore *uncore)
REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
- drm_err(drm, "GuC firmware signature verification failed\n");
+ drm_dbg(drm, "GuC firmware signature verification failed\n");
ret = -ENOEXEC;
}
if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) {
- drm_err(drm, "GuC firmware exception. EIP: %#x\n",
+ drm_dbg(drm, "GuC firmware exception. EIP: %#x\n",
intel_uncore_read(uncore, SOFT_SCRATCH(13)));
ret = -ENXIO;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index fdfeb4b9b0f5..23dc0aeaa0ab 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -6,11 +6,14 @@
#include <linux/circ_buf.h>
#include "gem/i915_gem_context.h"
+#include "gt/gen8_engine_cs.h"
+#include "gt/intel_breadcrumbs.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
-#include "gt/intel_lrc_reg.h"
+#include "gt/intel_lrc.h"
+#include "gt/intel_mocs.h"
#include "gt/intel_ring.h"
#include "intel_guc_submission.h"
@@ -54,6 +57,8 @@
*
*/
+#define GUC_REQUEST_SIZE 64 /* bytes */
+
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
@@ -66,58 +71,6 @@ static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc, u32 id)
return &base[id];
}
-static int guc_workqueue_create(struct intel_guc *guc)
-{
- return intel_guc_allocate_and_map_vma(guc, GUC_WQ_SIZE, &guc->workqueue,
- &guc->workqueue_vaddr);
-}
-
-static void guc_workqueue_destroy(struct intel_guc *guc)
-{
- i915_vma_unpin_and_release(&guc->workqueue, I915_VMA_RELEASE_MAP);
-}
-
-/*
- * Initialise the process descriptor shared with the GuC firmware.
- */
-static int guc_proc_desc_create(struct intel_guc *guc)
-{
- const u32 size = PAGE_ALIGN(sizeof(struct guc_process_desc));
-
- return intel_guc_allocate_and_map_vma(guc, size, &guc->proc_desc,
- &guc->proc_desc_vaddr);
-}
-
-static void guc_proc_desc_destroy(struct intel_guc *guc)
-{
- i915_vma_unpin_and_release(&guc->proc_desc, I915_VMA_RELEASE_MAP);
-}
-
-static void guc_proc_desc_init(struct intel_guc *guc)
-{
- struct guc_process_desc *desc;
-
- desc = memset(guc->proc_desc_vaddr, 0, sizeof(*desc));
-
- /*
- * XXX: pDoorbell and WQVBaseAddress are pointers in process address
- * space for ring3 clients (set them as in mmap_ioctl) or kernel
- * space for kernel clients (map on demand instead? May make debug
- * easier to have it mapped).
- */
- desc->wq_base_addr = 0;
- desc->db_base_addr = 0;
-
- desc->wq_size_bytes = GUC_WQ_SIZE;
- desc->wq_status = WQ_STATUS_ACTIVE;
- desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
-}
-
-static void guc_proc_desc_fini(struct intel_guc *guc)
-{
- memset(guc->proc_desc_vaddr, 0, sizeof(struct guc_process_desc));
-}
-
static int guc_stage_desc_pool_create(struct intel_guc *guc)
{
u32 size = PAGE_ALIGN(sizeof(struct guc_stage_desc) *
@@ -153,8 +106,6 @@ static void guc_stage_desc_init(struct intel_guc *guc)
desc->stage_id = 0;
desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
- desc->process_desc = intel_guc_ggtt_offset(guc, guc->proc_desc);
- desc->wq_addr = intel_guc_ggtt_offset(guc, guc->workqueue);
desc->wq_size = GUC_WQ_SIZE;
}
@@ -166,62 +117,9 @@ static void guc_stage_desc_fini(struct intel_guc *guc)
memset(desc, 0, sizeof(*desc));
}
-/* Construct a Work Item and append it to the GuC's Work Queue */
-static void guc_wq_item_append(struct intel_guc *guc,
- u32 target_engine, u32 context_desc,
- u32 ring_tail, u32 fence_id)
-{
- /* wqi_len is in DWords, and does not include the one-word header */
- const size_t wqi_size = sizeof(struct guc_wq_item);
- const u32 wqi_len = wqi_size / sizeof(u32) - 1;
- struct guc_process_desc *desc = guc->proc_desc_vaddr;
- struct guc_wq_item *wqi;
- u32 wq_off;
-
- lockdep_assert_held(&guc->wq_lock);
-
- /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
- * should not have the case where structure wqi is across page, neither
- * wrapped to the beginning. This simplifies the implementation below.
- *
- * XXX: if not the case, we need save data to a temp wqi and copy it to
- * workqueue buffer dw by dw.
- */
- BUILD_BUG_ON(wqi_size != 16);
-
- /* We expect the WQ to be active if we're appending items to it */
- GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE);
-
- /* Free space is guaranteed. */
- wq_off = READ_ONCE(desc->tail);
- GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head),
- GUC_WQ_SIZE) < wqi_size);
- GEM_BUG_ON(wq_off & (wqi_size - 1));
-
- wqi = guc->workqueue_vaddr + wq_off;
-
- /* Now fill in the 4-word work queue item */
- wqi->header = WQ_TYPE_INORDER |
- (wqi_len << WQ_LEN_SHIFT) |
- (target_engine << WQ_TARGET_SHIFT) |
- WQ_NO_WCFLUSH_WAIT;
- wqi->context_desc = context_desc;
- wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
- GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
- wqi->fence_id = fence_id;
-
- /* Make the update visible to GuC */
- WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
-}
-
static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
- struct intel_engine_cs *engine = rq->engine;
- u32 ctx_desc = rq->context->lrc.ccid;
- u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
-
- guc_wq_item_append(guc, engine->guc_id, ctx_desc,
- ring_tail, rq->fence.seqno);
+ /* Leaving stub as this function will be used in future patches */
}
/*
@@ -244,16 +142,12 @@ static void guc_submit(struct intel_engine_cs *engine,
{
struct intel_guc *guc = &engine->gt->uc.guc;
- spin_lock(&guc->wq_lock);
-
do {
struct i915_request *rq = *out++;
flush_ggtt_writes(rq->ring->vma);
guc_add_request(guc, rq);
} while (out != end);
-
- spin_unlock(&guc->wq_lock);
}
static inline int rq_prio(const struct i915_request *rq)
@@ -388,17 +282,26 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
__tasklet_disable_sync_once(&execlists->tasklet);
}
-static void
-cancel_port_requests(struct intel_engine_execlists * const execlists)
+static void guc_reset_state(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ u32 head,
+ bool scrub)
{
- struct i915_request * const *port, *rq;
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
- /* Note we are only using the inflight and not the pending queue */
+ /*
+ * We want a simple context + ring to execute the breadcrumb update.
+ * We cannot rely on the context being intact across the GPU hang,
+ * so clear it and rebuild just what we need for the breadcrumb.
+ * All pending requests for this context will be zapped, and any
+ * future request will be after userspace has had the opportunity
+ * to recreate its own state.
+ */
+ if (scrub)
+ lrc_init_regs(ce, engine, true);
- for (port = execlists->active; (rq = *port); port++)
- schedule_out(rq);
- execlists->active =
- memset(execlists->inflight, 0, sizeof(execlists->inflight));
+ /* Rerun the request; its payload has been neutered (if guilty). */
+ lrc_update_regs(ce, engine, head);
}
static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
@@ -409,8 +312,6 @@ static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
spin_lock_irqsave(&engine->active.lock, flags);
- cancel_port_requests(execlists);
-
/* Push back any incomplete requests for replay after the reset. */
rq = execlists_unwind_incomplete_requests(execlists);
if (!rq)
@@ -420,7 +321,7 @@ static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
stalled = false;
__i915_request_reset(rq, stalled);
- intel_lr_context_reset(engine, rq->context, rq->head, stalled);
+ guc_reset_state(rq->context, engine, rq->head, stalled);
out_unlock:
spin_unlock_irqrestore(&engine->active.lock, flags);
@@ -451,9 +352,6 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
*/
spin_lock_irqsave(&engine->active.lock, flags);
- /* Cancel the requests on the HW and clear the ELSP tracker. */
- cancel_port_requests(execlists);
-
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link) {
i915_request_set_error_once(rq, -EIO);
@@ -497,12 +395,6 @@ static void guc_reset_finish(struct intel_engine_cs *engine)
}
/*
- * Everything below here is concerned with setup & teardown, and is
- * therefore not part of the somewhat time-critical batch-submission
- * path of guc_submit() above.
- */
-
-/*
* Set up the memory resources to be shared with the GuC (via the GGTT)
* at firmware loading time.
*/
@@ -522,30 +414,12 @@ int intel_guc_submission_init(struct intel_guc *guc)
*/
GEM_BUG_ON(!guc->stage_desc_pool);
- ret = guc_workqueue_create(guc);
- if (ret)
- goto err_pool;
-
- ret = guc_proc_desc_create(guc);
- if (ret)
- goto err_workqueue;
-
- spin_lock_init(&guc->wq_lock);
-
return 0;
-
-err_workqueue:
- guc_workqueue_destroy(guc);
-err_pool:
- guc_stage_desc_pool_destroy(guc);
- return ret;
}
void intel_guc_submission_fini(struct intel_guc *guc)
{
if (guc->stage_desc_pool) {
- guc_proc_desc_destroy(guc);
- guc_workqueue_destroy(guc);
guc_stage_desc_pool_destroy(guc);
}
}
@@ -576,33 +450,186 @@ static void guc_interrupts_release(struct intel_gt *gt)
intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0, dmask);
}
-static void guc_set_default_submission(struct intel_engine_cs *engine)
+static int guc_context_alloc(struct intel_context *ce)
+{
+ return lrc_alloc(ce, ce->engine);
+}
+
+static int guc_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
+{
+ return lrc_pre_pin(ce, ce->engine, ww, vaddr);
+}
+
+static int guc_context_pin(struct intel_context *ce, void *vaddr)
{
+ return lrc_pin(ce, ce->engine, vaddr);
+}
+
+static const struct intel_context_ops guc_context_ops = {
+ .alloc = guc_context_alloc,
+
+ .pre_pin = guc_context_pre_pin,
+ .pin = guc_context_pin,
+ .unpin = lrc_unpin,
+ .post_unpin = lrc_post_unpin,
+
+ .enter = intel_context_enter_engine,
+ .exit = intel_context_exit_engine,
+
+ .reset = lrc_reset,
+ .destroy = lrc_destroy,
+};
+
+static int guc_request_alloc(struct i915_request *request)
+{
+ int ret;
+
+ GEM_BUG_ON(!intel_context_is_pinned(request->context));
+
/*
- * We inherit a bunch of functions from execlists that we'd like
- * to keep using:
- *
- * engine->submit_request = execlists_submit_request;
- * engine->cancel_requests = execlists_cancel_requests;
- * engine->schedule = execlists_schedule;
+ * Flush enough space to reduce the likelihood of waiting after
+ * we start building the request - in which case we will just
+ * have to repeat work.
+ */
+ request->reserved_space += GUC_REQUEST_SIZE;
+
+ /*
+ * Note that after this point, we have committed to using
+ * this request as it is being used to both track the
+ * state of engine initialisation and liveness of the
+ * golden renderstate above. Think twice before you try
+ * to cancel/unwind this request now.
+ */
+
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+ if (ret)
+ return ret;
+
+ request->reserved_space -= GUC_REQUEST_SIZE;
+ return 0;
+}
+
+static inline void queue_request(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ int prio)
+{
+ GEM_BUG_ON(!list_empty(&rq->sched.link));
+ list_add_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(engine, prio));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+}
+
+static void guc_submit_request(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ unsigned long flags;
+
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ queue_request(engine, rq, rq_prio(rq));
+
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+ GEM_BUG_ON(list_empty(&rq->sched.link));
+
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+static void sanitize_hwsp(struct intel_engine_cs *engine)
+{
+ struct intel_timeline *tl;
+
+ list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
+ intel_timeline_reset_seqno(tl);
+}
+
+static void guc_sanitize(struct intel_engine_cs *engine)
+{
+ /*
+ * Poison residual state on resume, in case the suspend didn't!
*
- * But we need to override the actual submission backend in order
- * to talk to the GuC.
+ * We have to assume that across suspend/resume (or other loss
+ * of control) that the contents of our pinned buffers has been
+ * lost, replaced by garbage. Since this doesn't always happen,
+ * let's poison such state so that we more quickly spot when
+ * we falsely assume it has been preserved.
*/
- intel_execlists_set_default_submission(engine);
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
- engine->execlists.tasklet.func = guc_submission_tasklet;
+ /*
+ * The kernel_context HWSP is stored in the status_page. As above,
+ * that may be lost on resume/initialisation, and so we need to
+ * reset the value in the HWSP.
+ */
+ sanitize_hwsp(engine);
- /* do not use execlists park/unpark */
- engine->park = engine->unpark = NULL;
+ /* And scrub the dirty cachelines for the HWSP */
+ clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+}
+
+static void setup_hwsp(struct intel_engine_cs *engine)
+{
+ intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
+
+ ENGINE_WRITE_FW(engine,
+ RING_HWS_PGA,
+ i915_ggtt_offset(engine->status_page.vma));
+}
+
+static void start_engine(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE_FW(engine,
+ RING_MODE_GEN7,
+ _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
+
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_POSTING_READ(engine, RING_MI_MODE);
+}
+
+static int guc_resume(struct intel_engine_cs *engine)
+{
+ assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
+
+ intel_mocs_init_engine(engine);
+
+ intel_breadcrumbs_reset(engine->breadcrumbs);
+
+ setup_hwsp(engine);
+ start_engine(engine);
+
+ return 0;
+}
+
+static void guc_set_default_submission(struct intel_engine_cs *engine)
+{
+ engine->submit_request = guc_submit_request;
+ engine->schedule = i915_schedule;
+ engine->execlists.tasklet.func = guc_submission_tasklet;
engine->reset.prepare = guc_reset_prepare;
engine->reset.rewind = guc_reset_rewind;
engine->reset.cancel = guc_reset_cancel;
engine->reset.finish = guc_reset_finish;
- engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
+ engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+
+ /*
+ * TODO: GuC supports timeslicing and semaphores as well, but they're
+ * handled by the firmware so some minor tweaks are required before
+ * enabling.
+ *
+ * engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+ * engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+ */
+
+ engine->emit_bb_start = gen8_emit_bb_start;
/*
* For the breadcrumb irq to work we need the interrupts to stay
@@ -613,35 +640,92 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
GEM_BUG_ON(engine->irq_enable || engine->irq_disable);
}
-void intel_guc_submission_enable(struct intel_guc *guc)
+static void guc_release(struct intel_engine_cs *engine)
{
- struct intel_gt *gt = guc_to_gt(guc);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
+
+ tasklet_kill(&engine->execlists.tasklet);
+
+ intel_engine_cleanup_common(engine);
+ lrc_fini_wa_ctx(engine);
+}
+
+static void guc_default_vfuncs(struct intel_engine_cs *engine)
+{
+ /* Default vfuncs which can be overridden by each engine. */
+
+ engine->resume = guc_resume;
+
+ engine->cops = &guc_context_ops;
+ engine->request_alloc = guc_request_alloc;
+
+ engine->emit_flush = gen8_emit_flush_xcs;
+ engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
+ if (INTEL_GEN(engine->i915) >= 12) {
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
+ engine->emit_flush = gen12_emit_flush_xcs;
+ }
+ engine->set_default_submission = guc_set_default_submission;
+}
+
+static void rcs_submission_override(struct intel_engine_cs *engine)
+{
+ switch (INTEL_GEN(engine->i915)) {
+ case 12:
+ engine->emit_flush = gen12_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
+ break;
+ case 11:
+ engine->emit_flush = gen11_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
+ break;
+ default:
+ engine->emit_flush = gen8_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
+ break;
+ }
+}
+
+static inline void guc_default_irqs(struct intel_engine_cs *engine)
+{
+ engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT;
+}
+
+int intel_guc_submission_setup(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
/*
- * We're using GuC work items for submitting work through GuC. Since
- * we're coalescing multiple requests from a single context into a
- * single work item prior to assigning it to execlist_port, we can
- * never have more work items than the total number of ports (for all
- * engines). The GuC firmware is controlling the HEAD of work queue,
- * and it is guaranteed that it will remove the work item from the
- * queue before our request is completed.
+ * The setup relies on several assumptions (e.g. irqs always enabled)
+ * that are only valid on gen11+
*/
- BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) *
- sizeof(struct guc_wq_item) *
- I915_NUM_ENGINES > GUC_WQ_SIZE);
+ GEM_BUG_ON(INTEL_GEN(i915) < 11);
+
+ tasklet_init(&engine->execlists.tasklet,
+ guc_submission_tasklet, (unsigned long)engine);
+
+ guc_default_vfuncs(engine);
+ guc_default_irqs(engine);
- guc_proc_desc_init(guc);
+ if (engine->class == RENDER_CLASS)
+ rcs_submission_override(engine);
+
+ lrc_init_wa_ctx(engine);
+
+ /* Finally, take ownership and responsibility for cleanup! */
+ engine->sanitize = guc_sanitize;
+ engine->release = guc_release;
+
+ return 0;
+}
+
+void intel_guc_submission_enable(struct intel_guc *guc)
+{
guc_stage_desc_init(guc);
/* Take over from manual control of ELSP (execlists) */
- guc_interrupts_capture(gt);
-
- for_each_engine(engine, gt, id) {
- engine->set_default_submission = guc_set_default_submission;
- engine->set_default_submission(engine);
- }
+ guc_interrupts_capture(guc_to_gt(guc));
}
void intel_guc_submission_disable(struct intel_guc *guc)
@@ -655,7 +739,6 @@ void intel_guc_submission_disable(struct intel_guc *guc)
guc_interrupts_release(gt);
guc_stage_desc_fini(guc);
- guc_proc_desc_fini(guc);
}
static bool __guc_submission_selected(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index 4cf9d3e50263..5f7b9e6347d0 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -19,6 +19,7 @@ void intel_guc_submission_disable(struct intel_guc *guc);
void intel_guc_submission_fini(struct intel_guc *guc);
int intel_guc_preempt_work_create(struct intel_guc *guc);
void intel_guc_preempt_work_destroy(struct intel_guc *guc);
+int intel_guc_submission_setup(struct intel_engine_cs *engine);
bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine);
static inline bool intel_guc_submission_is_supported(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 4e6070e95fe9..6abb8f2dc33d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -15,6 +15,29 @@
static const struct intel_uc_ops uc_ops_off;
static const struct intel_uc_ops uc_ops_on;
+static void uc_expand_default_options(struct intel_uc *uc)
+{
+ struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+
+ if (i915->params.enable_guc != -1)
+ return;
+
+ /* Don't enable GuC/HuC on pre-Gen12 */
+ if (INTEL_GEN(i915) < 12) {
+ i915->params.enable_guc = 0;
+ return;
+ }
+
+ /* Don't enable GuC/HuC on older Gen12 platforms */
+ if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) {
+ i915->params.enable_guc = 0;
+ return;
+ }
+
+ /* Default: enable HuC authentication only */
+ i915->params.enable_guc = ENABLE_GUC_LOAD_HUC;
+}
+
/* Reset GuC providing us with fresh state for both GuC and HuC.
*/
static int __intel_uc_reset_hw(struct intel_uc *uc)
@@ -52,9 +75,6 @@ static void __confirm_options(struct intel_uc *uc)
yesno(intel_uc_wants_guc_submission(uc)),
yesno(intel_uc_wants_huc(uc)));
- if (i915->params.enable_guc == -1)
- return;
-
if (i915->params.enable_guc == 0) {
GEM_BUG_ON(intel_uc_wants_guc(uc));
GEM_BUG_ON(intel_uc_wants_guc_submission(uc));
@@ -79,8 +99,7 @@ static void __confirm_options(struct intel_uc *uc)
"Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "GuC submission is N/A");
- if (i915->params.enable_guc & ~(ENABLE_GUC_SUBMISSION |
- ENABLE_GUC_LOAD_HUC))
+ if (i915->params.enable_guc & ~ENABLE_GUC_MASK)
drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "undocumented flag");
@@ -88,6 +107,8 @@ static void __confirm_options(struct intel_uc *uc)
void intel_uc_init_early(struct intel_uc *uc)
{
+ uc_expand_default_options(uc);
+
intel_guc_init_early(&uc->guc);
intel_huc_init_early(&uc->huc);
@@ -175,19 +196,15 @@ static void guc_get_mmio_msg(struct intel_guc *guc)
static void guc_handle_mmio_msg(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
-
/* we need communication to be enabled to reply to GuC */
GEM_BUG_ON(!guc_communication_enabled(guc));
- if (!guc->mmio_msg)
- return;
-
- spin_lock_irq(&i915->irq_lock);
- intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
- spin_unlock_irq(&i915->irq_lock);
-
- guc->mmio_msg = 0;
+ spin_lock_irq(&guc->irq_lock);
+ if (guc->mmio_msg) {
+ intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
+ guc->mmio_msg = 0;
+ }
+ spin_unlock_irq(&guc->irq_lock);
}
static void guc_reset_interrupts(struct intel_guc *guc)
@@ -207,7 +224,8 @@ static void guc_disable_interrupts(struct intel_guc *guc)
static int guc_enable_communication(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = gt->i915;
int ret;
GEM_BUG_ON(guc_communication_enabled(guc));
@@ -227,9 +245,9 @@ static int guc_enable_communication(struct intel_guc *guc)
guc_enable_interrupts(guc);
/* check for CT messages received before we enabled interrupts */
- spin_lock_irq(&i915->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
intel_guc_ct_event_handler(&guc->ct);
- spin_unlock_irq(&i915->irq_lock);
+ spin_unlock_irq(&gt->irq_lock);
drm_dbg(&i915->drm, "GuC communication enabled\n");
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 602f1a0bc587..67b06fde1225 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -152,16 +152,11 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
uc_fw->path = NULL;
}
}
-
- /* We don't want to enable GuC/HuC on pre-Gen11 by default */
- if (i915->params.enable_guc == -1 && p < INTEL_ICELAKE)
- uc_fw->path = NULL;
}
static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
{
- if (i915->params.enable_guc & (ENABLE_GUC_SUBMISSION |
- ENABLE_GUC_LOAD_HUC))
+ if (i915->params.enable_guc & ENABLE_GUC_MASK)
return i915->params.guc_firmware_path;
return "";
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 16b582cb97ed..7fb91de06557 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -37,11 +37,18 @@
#include <linux/slab.h>
#include "i915_drv.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_lrc.h"
#include "gt/intel_ring.h"
+#include "gt/intel_gt_requests.h"
#include "gvt.h"
#include "i915_pvinfo.h"
#include "trace.h"
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_context.h"
+
#define INVALID_OP (~0U)
#define OP_LEN_MI 9
@@ -454,6 +461,7 @@ enum {
RING_BUFFER_INSTRUCTION,
BATCH_BUFFER_INSTRUCTION,
BATCH_BUFFER_2ND_LEVEL,
+ RING_BUFFER_CTX,
};
enum {
@@ -495,6 +503,7 @@ struct parser_exec_state {
*/
int saved_buf_addr_type;
bool is_ctx_wa;
+ bool is_init_ctx;
const struct cmd_info *info;
@@ -708,6 +717,11 @@ static inline u32 cmd_val(struct parser_exec_state *s, int index)
return *cmd_ptr(s, index);
}
+static inline bool is_init_ctx(struct parser_exec_state *s)
+{
+ return (s->buf_type == RING_BUFFER_CTX && s->is_init_ctx);
+}
+
static void parser_exec_state_dump(struct parser_exec_state *s)
{
int cnt = 0;
@@ -721,7 +735,8 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
s->buf_type == RING_BUFFER_INSTRUCTION ?
- "RING_BUFFER" : "BATCH_BUFFER",
+ "RING_BUFFER" : ((s->buf_type == RING_BUFFER_CTX) ?
+ "CTX_BUFFER" : "BATCH_BUFFER"),
s->buf_addr_type == GTT_BUFFER ?
"GTT" : "PPGTT", s->ip_gma);
@@ -756,7 +771,8 @@ static inline void update_ip_va(struct parser_exec_state *s)
if (WARN_ON(s->ring_head == s->ring_tail))
return;
- if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ if (s->buf_type == RING_BUFFER_INSTRUCTION ||
+ s->buf_type == RING_BUFFER_CTX) {
unsigned long ring_top = s->ring_start + s->ring_size;
if (s->ring_head > s->ring_tail) {
@@ -820,68 +836,12 @@ static inline int cmd_length(struct parser_exec_state *s)
*addr = val; \
} while (0)
-static bool is_shadowed_mmio(unsigned int offset)
-{
- bool ret = false;
-
- if ((offset == 0x2168) || /*BB current head register UDW */
- (offset == 0x2140) || /*BB current header register */
- (offset == 0x211c) || /*second BB header register UDW */
- (offset == 0x2114)) { /*second BB header register UDW */
- ret = true;
- }
- return ret;
-}
-
-static inline bool is_force_nonpriv_mmio(unsigned int offset)
-{
- return (offset >= 0x24d0 && offset < 0x2500);
-}
-
-static int force_nonpriv_reg_handler(struct parser_exec_state *s,
- unsigned int offset, unsigned int index, char *cmd)
-{
- struct intel_gvt *gvt = s->vgpu->gvt;
- unsigned int data;
- u32 ring_base;
- u32 nopid;
-
- if (!strcmp(cmd, "lri"))
- data = cmd_val(s, index + 1);
- else {
- gvt_err("Unexpected forcenonpriv 0x%x write from cmd %s\n",
- offset, cmd);
- return -EINVAL;
- }
-
- ring_base = s->engine->mmio_base;
- nopid = i915_mmio_reg_offset(RING_NOPID(ring_base));
-
- if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) &&
- data != nopid) {
- gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
- offset, data);
- patch_value(s, cmd_ptr(s, index), nopid);
- return 0;
- }
- return 0;
-}
-
static inline bool is_mocs_mmio(unsigned int offset)
{
return ((offset >= 0xc800) && (offset <= 0xcff8)) ||
((offset >= 0xb020) && (offset <= 0xb0a0));
}
-static int mocs_cmd_reg_handler(struct parser_exec_state *s,
- unsigned int offset, unsigned int index)
-{
- if (!is_mocs_mmio(offset))
- return -EINVAL;
- vgpu_vreg(s->vgpu, offset) = cmd_val(s, index + 1);
- return 0;
-}
-
static int is_cmd_update_pdps(unsigned int offset,
struct parser_exec_state *s)
{
@@ -929,6 +889,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
struct intel_vgpu *vgpu = s->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
u32 ctx_sr_ctl;
+ u32 *vreg, vreg_old;
if (offset + 4 > gvt->device_info.mmio_size) {
gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
@@ -936,34 +897,101 @@ static int cmd_reg_handler(struct parser_exec_state *s,
return -EFAULT;
}
+ if (is_init_ctx(s)) {
+ struct intel_gvt_mmio_info *mmio_info;
+
+ intel_gvt_mmio_set_cmd_accessible(gvt, offset);
+ mmio_info = intel_gvt_find_mmio_info(gvt, offset);
+ if (mmio_info && mmio_info->write)
+ intel_gvt_mmio_set_cmd_write_patch(gvt, offset);
+ return 0;
+ }
+
if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
gvt_vgpu_err("%s access to non-render register (%x)\n",
cmd, offset);
return -EBADRQC;
}
- if (is_shadowed_mmio(offset)) {
- gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
- return 0;
+ if (!strncmp(cmd, "srm", 3) ||
+ !strncmp(cmd, "lrm", 3)) {
+ if (offset != i915_mmio_reg_offset(GEN8_L3SQCREG4) &&
+ offset != 0x21f0) {
+ gvt_vgpu_err("%s access to register (%x)\n",
+ cmd, offset);
+ return -EPERM;
+ } else
+ return 0;
}
- if (is_mocs_mmio(offset) &&
- mocs_cmd_reg_handler(s, offset, index))
- return -EINVAL;
+ if (!strncmp(cmd, "lrr-src", 7) ||
+ !strncmp(cmd, "lrr-dst", 7)) {
+ gvt_vgpu_err("not allowed cmd %s\n", cmd);
+ return -EPERM;
+ }
+
+ if (!strncmp(cmd, "pipe_ctrl", 9)) {
+ /* TODO: add LRI POST logic here */
+ return 0;
+ }
- if (is_force_nonpriv_mmio(offset) &&
- force_nonpriv_reg_handler(s, offset, index, cmd))
+ if (strncmp(cmd, "lri", 3))
return -EPERM;
+ /* below are all lri handlers */
+ vreg = &vgpu_vreg(s->vgpu, offset);
+ if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
+ gvt_vgpu_err("%s access to non-render register (%x)\n",
+ cmd, offset);
+ return -EBADRQC;
+ }
+
+ if (is_cmd_update_pdps(offset, s) &&
+ cmd_pdp_mmio_update_handler(s, offset, index))
+ return -EINVAL;
+
if (offset == i915_mmio_reg_offset(DERRMR) ||
offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
}
- if (is_cmd_update_pdps(offset, s) &&
- cmd_pdp_mmio_update_handler(s, offset, index))
- return -EINVAL;
+ if (is_mocs_mmio(offset))
+ *vreg = cmd_val(s, index + 1);
+
+ vreg_old = *vreg;
+
+ if (intel_gvt_mmio_is_cmd_write_patch(gvt, offset)) {
+ u32 cmdval_new, cmdval;
+ struct intel_gvt_mmio_info *mmio_info;
+
+ cmdval = cmd_val(s, index + 1);
+
+ mmio_info = intel_gvt_find_mmio_info(gvt, offset);
+ if (!mmio_info) {
+ cmdval_new = cmdval;
+ } else {
+ u64 ro_mask = mmio_info->ro_mask;
+ int ret;
+
+ if (likely(!ro_mask))
+ ret = mmio_info->write(s->vgpu, offset,
+ &cmdval, 4);
+ else {
+ gvt_vgpu_err("try to write RO reg %x\n",
+ offset);
+ ret = -EBADRQC;
+ }
+ if (ret)
+ return ret;
+ cmdval_new = *vreg;
+ }
+ if (cmdval_new != cmdval)
+ patch_value(s, cmd_ptr(s, index+1), cmdval_new);
+ }
+
+ /* only patch cmd. restore vreg value if changed in mmio write handler*/
+ *vreg = vreg_old;
/* TODO
* In order to let workload with inhibit context to generate
@@ -1215,6 +1243,8 @@ static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
s->buf_type = BATCH_BUFFER_INSTRUCTION;
ret = ip_gma_set(s, s->ret_ip_gma_bb);
s->buf_addr_type = s->saved_buf_addr_type;
+ } else if (s->buf_type == RING_BUFFER_CTX) {
+ ret = ip_gma_set(s, s->ring_tail);
} else {
s->buf_type = RING_BUFFER_INSTRUCTION;
s->buf_addr_type = GTT_BUFFER;
@@ -2763,7 +2793,8 @@ static int command_scan(struct parser_exec_state *s,
gma_bottom = rb_start + rb_len;
while (s->ip_gma != gma_tail) {
- if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ if (s->buf_type == RING_BUFFER_INSTRUCTION ||
+ s->buf_type == RING_BUFFER_CTX) {
if (!(s->ip_gma >= rb_start) ||
!(s->ip_gma < gma_bottom)) {
gvt_vgpu_err("ip_gma %lx out of ring scope."
@@ -3056,6 +3087,171 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0;
}
+/* generate dummy contexts by sending empty requests to HW, and let
+ * the HW to fill Engine Contexts. This dummy contexts are used for
+ * initialization purpose (update reg whitelist), so referred to as
+ * init context here
+ */
+void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ const unsigned long start = LRC_STATE_PN * PAGE_SIZE;
+ struct i915_request *rq;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_request *requests[I915_NUM_ENGINES] = {};
+ bool is_ctx_pinned[I915_NUM_ENGINES] = {};
+ int ret;
+
+ if (gvt->is_reg_whitelist_updated)
+ return;
+
+ for_each_engine(engine, &dev_priv->gt, id) {
+ ret = intel_context_pin(s->shadow[id]);
+ if (ret) {
+ gvt_vgpu_err("fail to pin shadow ctx\n");
+ goto out;
+ }
+ is_ctx_pinned[id] = true;
+
+ rq = i915_request_create(s->shadow[id]);
+ if (IS_ERR(rq)) {
+ gvt_vgpu_err("fail to alloc default request\n");
+ ret = -EIO;
+ goto out;
+ }
+ requests[id] = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+
+ if (intel_gt_wait_for_idle(&dev_priv->gt,
+ I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /* scan init ctx to update cmd accessible list */
+ for_each_engine(engine, &dev_priv->gt, id) {
+ int size = engine->context_size - PAGE_SIZE;
+ void *vaddr;
+ struct parser_exec_state s;
+ struct drm_i915_gem_object *obj;
+ struct i915_request *rq;
+
+ rq = requests[id];
+ GEM_BUG_ON(!i915_request_completed(rq));
+ GEM_BUG_ON(!intel_context_is_pinned(rq->context));
+ obj = rq->context->state->obj;
+
+ if (!obj) {
+ ret = -EIO;
+ goto out;
+ }
+
+ i915_gem_object_set_cache_coherency(obj,
+ I915_CACHE_LLC);
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ gvt_err("failed to pin init ctx obj, ring=%d, err=%lx\n",
+ id, PTR_ERR(vaddr));
+ goto out;
+ }
+
+ s.buf_type = RING_BUFFER_CTX;
+ s.buf_addr_type = GTT_BUFFER;
+ s.vgpu = vgpu;
+ s.engine = engine;
+ s.ring_start = 0;
+ s.ring_size = size;
+ s.ring_head = 0;
+ s.ring_tail = size;
+ s.rb_va = vaddr + start;
+ s.workload = NULL;
+ s.is_ctx_wa = false;
+ s.is_init_ctx = true;
+
+ /* skipping the first RING_CTX_SIZE(0x50) dwords */
+ ret = ip_gma_set(&s, RING_CTX_SIZE);
+ if (ret) {
+ i915_gem_object_unpin_map(obj);
+ goto out;
+ }
+
+ ret = command_scan(&s, 0, size, 0, size);
+ if (ret)
+ gvt_err("Scan init ctx error\n");
+
+ i915_gem_object_unpin_map(obj);
+ }
+
+out:
+ if (!ret)
+ gvt->is_reg_whitelist_updated = true;
+
+ for (id = 0; id < I915_NUM_ENGINES ; id++) {
+ if (requests[id])
+ i915_request_put(requests[id]);
+
+ if (is_ctx_pinned[id])
+ intel_context_unpin(s->shadow[id]);
+ }
+}
+
+int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ unsigned long gma_head, gma_tail, gma_start, ctx_size;
+ struct parser_exec_state s;
+ int ring_id = workload->engine->id;
+ struct intel_context *ce = vgpu->submission.shadow[ring_id];
+ int ret;
+
+ GEM_BUG_ON(atomic_read(&ce->pin_count) < 0);
+
+ ctx_size = workload->engine->context_size - PAGE_SIZE;
+
+ /* Only ring contxt is loaded to HW for inhibit context, no need to
+ * scan engine context
+ */
+ if (is_inhibit_context(ce))
+ return 0;
+
+ gma_start = i915_ggtt_offset(ce->state) + LRC_STATE_PN*PAGE_SIZE;
+ gma_head = 0;
+ gma_tail = ctx_size;
+
+ s.buf_type = RING_BUFFER_CTX;
+ s.buf_addr_type = GTT_BUFFER;
+ s.vgpu = workload->vgpu;
+ s.engine = workload->engine;
+ s.ring_start = gma_start;
+ s.ring_size = ctx_size;
+ s.ring_head = gma_start + gma_head;
+ s.ring_tail = gma_start + gma_tail;
+ s.rb_va = ce->lrc_reg_state;
+ s.workload = workload;
+ s.is_ctx_wa = false;
+ s.is_init_ctx = false;
+
+ /* don't scan the first RING_CTX_SIZE(0x50) dwords, as it's ring
+ * context
+ */
+ ret = ip_gma_set(&s, gma_start + gma_head + RING_CTX_SIZE);
+ if (ret)
+ goto out;
+
+ ret = command_scan(&s, gma_head, gma_tail,
+ gma_start, ctx_size);
+out:
+ if (ret)
+ gvt_vgpu_err("scan shadow ctx error\n");
+
+ return ret;
+}
+
static int init_cmd_table(struct intel_gvt *gvt)
{
unsigned int gen_type = intel_gvt_get_device_type(gvt);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h
index ab25d151932a..416d345e2816 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.h
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
@@ -40,6 +40,7 @@
struct intel_gvt;
struct intel_shadow_wa_ctx;
+struct intel_vgpu;
struct intel_vgpu_workload;
void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
@@ -50,4 +51,8 @@ int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload);
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu);
+
+int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload);
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index d62cd14605a3..84ad74b37d66 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -182,7 +182,4 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
const struct intel_engine_cs *engine);
-void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
- intel_engine_mask_t engine_mask);
-
#endif /*_GVT_EXECLIST_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
index 67b6ede9e707..0daa3931aef7 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.h
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -38,6 +38,10 @@
#include <linux/types.h>
+#include "display/intel_display.h"
+
+struct intel_vgpu;
+
#define _PLANE_CTL_FORMAT_SHIFT 24
#define _PLANE_CTL_TILED_SHIFT 10
#define _PIPE_V_SRCSZ_SHIFT 0
@@ -98,8 +102,6 @@ enum DDI_PORT {
DDI_PORT_E = 4
};
-struct intel_gvt;
-
/* color space conversion and gamma correction are not included */
struct intel_vgpu_primary_plane_format {
u8 enabled; /* plane is enabled */
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index b0e173f2d990..3bf45672ef98 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -34,10 +34,19 @@
#ifndef _GVT_GTT_H_
#define _GVT_GTT_H_
-#define I915_GTT_PAGE_SHIFT 12
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+
+#include "gt/intel_gtt.h"
+struct intel_gvt;
+struct intel_vgpu;
struct intel_vgpu_mm;
+#define I915_GTT_PAGE_SHIFT 12
+
#define INTEL_GVT_INVALID_ADDR (~0UL)
struct intel_gvt_gtt_entry {
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index cf3578e3f4dd..03c993d68f10 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -33,6 +33,10 @@
#ifndef _GVT_H_
#define _GVT_H_
+#include <uapi/linux/pci_regs.h>
+
+#include "i915_drv.h"
+
#include "debug.h"
#include "hypercall.h"
#include "mmio.h"
@@ -244,7 +248,7 @@ struct gvt_mmio_block {
#define INTEL_GVT_MMIO_HASH_BITS 11
struct intel_gvt_mmio {
- u8 *mmio_attribute;
+ u16 *mmio_attribute;
/* Register contains RO bits */
#define F_RO (1 << 0)
/* Register contains graphics address */
@@ -263,6 +267,8 @@ struct intel_gvt_mmio {
* logical context image
*/
#define F_SR_IN_CTX (1 << 7)
+/* Value of command write of this reg needs to be patched */
+#define F_CMD_WRITE_PATCH (1 << 8)
struct gvt_mmio_block *mmio_block;
unsigned int num_mmio_block;
@@ -329,6 +335,7 @@ struct intel_gvt {
u32 *mocs_mmio_offset_list;
u32 mocs_mmio_offset_list_cnt;
} engine_mmio_list;
+ bool is_reg_whitelist_updated;
struct dentry *debugfs_root;
};
@@ -412,6 +419,9 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define vgpu_fence_base(vgpu) (vgpu->fence.base)
#define vgpu_fence_sz(vgpu) (vgpu->fence.size)
+/* ring context size i.e. the first 0x50 dwords*/
+#define RING_CTX_SIZE 320
+
struct intel_vgpu_creation_params {
__u64 handle;
__u64 low_gm_sz; /* in MB */
@@ -683,6 +693,35 @@ static inline void intel_gvt_mmio_set_sr_in_ctx(
}
void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
+/**
+ * intel_gvt_mmio_set_cmd_write_patch -
+ * mark an MMIO if its cmd write needs to be
+ * patched
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+static inline void intel_gvt_mmio_set_cmd_write_patch(
+ struct intel_gvt *gvt, unsigned int offset)
+{
+ gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_WRITE_PATCH;
+}
+
+/**
+ * intel_gvt_mmio_is_cmd_write_patch - check if an mmio's cmd access needs to
+ * be patched
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if GPU commmand write to an MMIO should be patched
+ */
+static inline bool intel_gvt_mmio_is_cmd_write_patch(
+ struct intel_gvt *gvt, unsigned int offset)
+{
+ return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH;
+}
+
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_debugfs_init(struct intel_gvt *gvt);
void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index aa7e75cb3e6a..6eeaeecb7f85 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -83,7 +83,7 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
}
-static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
+struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
unsigned int offset)
{
struct intel_gvt_mmio_info *e;
@@ -96,7 +96,7 @@ static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
}
static int new_mmio_info(struct intel_gvt *gvt,
- u32 offset, u8 flags, u32 size,
+ u32 offset, u16 flags, u32 size,
u32 addr_mask, u32 ro_mask, u32 device,
gvt_mmio_func read, gvt_mmio_func write)
{
@@ -118,7 +118,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
return -ENOMEM;
info->offset = i;
- p = find_mmio_info(gvt, info->offset);
+ p = intel_gvt_find_mmio_info(gvt, info->offset);
if (p) {
WARN(1, "dup mmio definition offset %x\n",
info->offset);
@@ -1651,7 +1651,7 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
return 0;
}
-/**
+/*
* FixMe:
* If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did:
* 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.)
@@ -1965,7 +1965,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
/* RING MODE */
#define RING_REG(base) _MMIO((base) + 0x29c)
- MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
+ MMIO_RING_DFH(RING_REG, D_ALL,
+ F_MODE_MASK | F_CMD_ACCESS | F_CMD_WRITE_PATCH, NULL,
ring_mode_mmio_write);
#undef RING_REG
@@ -2885,8 +2886,8 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(_MMIO(0xb110), D_BDW);
- MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
- NULL, force_nonpriv_write);
+ MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
+ D_BDW_PLUS, NULL, force_nonpriv_write);
MMIO_D(_MMIO(0x44484), D_BDW_PLUS);
MMIO_D(_MMIO(0x4448c), D_BDW_PLUS);
@@ -3626,7 +3627,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
/*
* Normal tracked MMIOs.
*/
- mmio_info = find_mmio_info(gvt, offset);
+ mmio_info = intel_gvt_find_mmio_info(gvt, offset);
if (!mmio_info) {
gvt_dbg_mmio("untracked MMIO %08x len %d\n", offset, bytes);
goto default_rw;
@@ -3686,14 +3687,13 @@ void intel_gvt_restore_fence(struct intel_gvt *gvt)
}
}
-static inline int mmio_pm_restore_handler(struct intel_gvt *gvt,
- u32 offset, void *data)
+static int mmio_pm_restore_handler(struct intel_gvt *gvt, u32 offset, void *data)
{
struct intel_vgpu *vgpu = data;
struct drm_i915_private *dev_priv = gvt->gt->i915;
if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
- I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
+ intel_uncore_write(&dev_priv->uncore, _MMIO(offset), vgpu_vreg(vgpu, offset));
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h
index fcd663811d37..287cd142629e 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.h
+++ b/drivers/gpu/drm/i915/gvt/interrupt.h
@@ -32,7 +32,10 @@
#ifndef _GVT_INTERRUPT_H_
#define _GVT_INTERRUPT_H_
-#include <linux/types.h>
+#include <linux/hrtimer.h>
+#include <linux/kernel.h>
+
+#include "i915_reg.h"
enum intel_gvt_event_type {
RCS_MI_USER_INTERRUPT = 0,
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 9e862dc73579..7c26af39fbfc 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -80,6 +80,9 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
void *data);
+struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
+ unsigned int offset);
+
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr);
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index afe574d6b3b5..c9589e26af93 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -35,6 +35,7 @@
#include "i915_drv.h"
#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
#include "gvt.h"
#include "trace.h"
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 3b25e7fe32f6..b6b69777af49 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -36,6 +36,18 @@
#ifndef __GVT_RENDER_H__
#define __GVT_RENDER_H__
+#include <linux/types.h>
+
+#include "gt/intel_engine_types.h"
+#include "gt/intel_lrc_reg.h"
+#include "i915_reg.h"
+
+struct i915_request;
+struct intel_context;
+struct intel_engine_cs;
+struct intel_gvt;
+struct intel_vgpu;
+
struct engine_mmio {
enum intel_engine_id id;
i915_reg_t reg;
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 6f92cde71971..550a456e936f 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -33,6 +33,8 @@
#ifndef _GVT_MPT_H_
#define _GVT_MPT_H_
+#include "gvt.h"
+
/**
* DOC: Hypervisor Service APIs for GVT-g Core Logic
*
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index b58860dee970..244cc7320b54 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -133,4 +133,6 @@
#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
#define VF_GUARDBAND _MMIO(0x83a4)
+
+#define BCS_TILE_REGISTER_VAL_OFFSET (0x43*4)
#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index aed2ef6466a2..43f31c2eab14 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -37,6 +37,8 @@
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
+#include "gt/intel_execlists_submission.h"
+#include "gt/intel_lrc.h"
#include "gt/intel_ring.h"
#include "i915_drv.h"
@@ -135,6 +137,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
int i;
bool skip = false;
int ring_id = workload->engine->id;
+ int ret;
GEM_BUG_ON(!intel_context_is_pinned(ctx));
@@ -161,16 +164,24 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG(bb_per_ctx_ptr);
COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset);
- }
+ } else if (workload->engine->id == BCS0)
+ intel_gvt_hypervisor_read_gpa(vgpu,
+ workload->ring_context_gpa +
+ BCS_TILE_REGISTER_VAL_OFFSET,
+ (void *)shadow_ring_context +
+ BCS_TILE_REGISTER_VAL_OFFSET, 4);
#undef COPY_REG
#undef COPY_REG_MASKED
+ /* don't copy Ring Context (the first 0x50 dwords),
+ * only copy the Engine Context part from guest
+ */
intel_gvt_hypervisor_read_gpa(vgpu,
workload->ring_context_gpa +
- sizeof(*shadow_ring_context),
+ RING_CTX_SIZE,
(void *)shadow_ring_context +
- sizeof(*shadow_ring_context),
- I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+ RING_CTX_SIZE,
+ I915_GTT_PAGE_SIZE - RING_CTX_SIZE);
sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
@@ -237,6 +248,11 @@ read:
gpa_size = I915_GTT_PAGE_SIZE;
dst = context_base + (i << I915_GTT_PAGE_SHIFT);
}
+ ret = intel_gvt_scan_engine_context(workload);
+ if (ret) {
+ gvt_vgpu_err("invalid cmd found in guest context pages\n");
+ return ret;
+ }
s->last_ctx[ring_id].valid = true;
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 64e7a0b791c3..7c86984a842f 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -36,6 +36,11 @@
#ifndef _GVT_SCHEDULER_H_
#define _GVT_SCHEDULER_H_
+#include "gt/intel_engine_types.h"
+
+#include "execlist.h"
+#include "interrupt.h"
+
struct intel_gvt_workload_scheduler {
struct intel_vgpu *current_vgpu;
struct intel_vgpu *next_vgpu;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index cbe5931906e0..6a16d0ca7cda 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -499,9 +499,11 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
mutex_lock(&gvt->lock);
vgpu = __intel_gvt_create_vgpu(gvt, &param);
- if (!IS_ERR(vgpu))
+ if (!IS_ERR(vgpu)) {
/* calculate left instance change for types */
intel_gvt_update_vgpu_types(gvt);
+ intel_gvt_update_reg_whitelist(vgpu);
+ }
mutex_unlock(&gvt->lock);
return vgpu;
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 9ed19b8bca60..3bc616cc1ad2 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -159,8 +159,7 @@ __active_retire(struct i915_active *ref)
GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
/* Make the cached node available for reuse with any timeline */
- if (IS_ENABLED(CONFIG_64BIT))
- ref->cache->timeline = 0; /* needs cmpxchg(u64) */
+ ref->cache->timeline = 0; /* needs cmpxchg(u64) */
}
spin_unlock_irqrestore(&ref->tree_lock, flags);
@@ -256,7 +255,6 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
if (cached == idx)
return it;
-#ifdef CONFIG_64BIT /* for cmpxchg(u64) */
/*
* An unclaimed cache [.timeline=0] can only be claimed once.
*
@@ -267,9 +265,8 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
* only the winner of that race will cmpxchg return the old
* value of 0).
*/
- if (!cached && !cmpxchg(&it->timeline, 0, idx))
+ if (!cached && !cmpxchg64(&it->timeline, 0, idx))
return it;
-#endif
}
BUILD_BUG_ON(offsetof(typeof(*it), node));
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index b0899b665e85..ced9a96d7c34 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -26,6 +26,7 @@
*/
#include "gt/intel_engine.h"
+#include "gt/intel_gpu_commands.h"
#include "i915_drv.h"
#include "i915_memcpy.h"
@@ -1142,7 +1143,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
void *dst, *src;
int ret;
- dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
+ dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
if (IS_ERR(dst))
return dst;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 77e76b665098..88336ff4bf09 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -45,6 +45,7 @@
#include "i915_debugfs.h"
#include "i915_debugfs_params.h"
#include "i915_irq.h"
+#include "i915_scheduler.h"
#include "i915_trace.h"
#include "intel_pm.h"
#include "intel_sideband.h"
@@ -209,7 +210,7 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
spin_unlock(&obj->vma.lock);
seq_printf(m, " (pinned x %d)", pin_count);
- if (obj->stolen)
+ if (i915_gem_object_is_stolen(obj))
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
if (i915_gem_object_is_framebuffer(obj))
seq_printf(m, " (fb)");
@@ -219,145 +220,6 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (%s)", engine->name);
}
-struct file_stats {
- struct i915_address_space *vm;
- unsigned long count;
- u64 total;
- u64 active, inactive;
- u64 closed;
-};
-
-static int per_file_stats(int id, void *ptr, void *data)
-{
- struct drm_i915_gem_object *obj = ptr;
- struct file_stats *stats = data;
- struct i915_vma *vma;
-
- if (IS_ERR_OR_NULL(obj) || !kref_get_unless_zero(&obj->base.refcount))
- return 0;
-
- stats->count++;
- stats->total += obj->base.size;
-
- spin_lock(&obj->vma.lock);
- if (!stats->vm) {
- for_each_ggtt_vma(vma, obj) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- if (i915_vma_is_active(vma))
- stats->active += vma->node.size;
- else
- stats->inactive += vma->node.size;
-
- if (i915_vma_is_closed(vma))
- stats->closed += vma->node.size;
- }
- } else {
- struct rb_node *p = obj->vma.tree.rb_node;
-
- while (p) {
- long cmp;
-
- vma = rb_entry(p, typeof(*vma), obj_node);
- cmp = i915_vma_compare(vma, stats->vm, NULL);
- if (cmp == 0) {
- if (drm_mm_node_allocated(&vma->node)) {
- if (i915_vma_is_active(vma))
- stats->active += vma->node.size;
- else
- stats->inactive += vma->node.size;
-
- if (i915_vma_is_closed(vma))
- stats->closed += vma->node.size;
- }
- break;
- }
- if (cmp < 0)
- p = p->rb_right;
- else
- p = p->rb_left;
- }
- }
- spin_unlock(&obj->vma.lock);
-
- i915_gem_object_put(obj);
- return 0;
-}
-
-#define print_file_stats(m, name, stats) do { \
- if (stats.count) \
- seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu closed)\n", \
- name, \
- stats.count, \
- stats.total, \
- stats.active, \
- stats.inactive, \
- stats.closed); \
-} while (0)
-
-static void print_context_stats(struct seq_file *m,
- struct drm_i915_private *i915)
-{
- struct file_stats kstats = {};
- struct i915_gem_context *ctx, *cn;
-
- spin_lock(&i915->gem.contexts.lock);
- list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
- struct i915_gem_engines_iter it;
- struct intel_context *ce;
-
- if (!kref_get_unless_zero(&ctx->ref))
- continue;
-
- spin_unlock(&i915->gem.contexts.lock);
-
- for_each_gem_engine(ce,
- i915_gem_context_lock_engines(ctx), it) {
- if (intel_context_pin_if_active(ce)) {
- rcu_read_lock();
- if (ce->state)
- per_file_stats(0,
- ce->state->obj, &kstats);
- per_file_stats(0, ce->ring->vma->obj, &kstats);
- rcu_read_unlock();
- intel_context_unpin(ce);
- }
- }
- i915_gem_context_unlock_engines(ctx);
-
- mutex_lock(&ctx->mutex);
- if (!IS_ERR_OR_NULL(ctx->file_priv)) {
- struct file_stats stats = {
- .vm = rcu_access_pointer(ctx->vm),
- };
- struct drm_file *file = ctx->file_priv->file;
- struct task_struct *task;
- char name[80];
-
- rcu_read_lock();
- idr_for_each(&file->object_idr, per_file_stats, &stats);
- rcu_read_unlock();
-
- rcu_read_lock();
- task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
- snprintf(name, sizeof(name), "%s",
- task ? task->comm : "<unknown>");
- rcu_read_unlock();
-
- print_file_stats(m, name, stats);
- }
- mutex_unlock(&ctx->mutex);
-
- spin_lock(&i915->gem.contexts.lock);
- list_safe_reset_next(ctx, cn, link);
- i915_gem_context_put(ctx);
- }
- spin_unlock(&i915->gem.contexts.lock);
-
- print_file_stats(m, "[k]contexts", kstats);
-}
-
static int i915_gem_object_info(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -371,311 +233,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
for_each_memory_region(mr, i915, id)
seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
mr->name, &mr->total, &mr->avail);
- seq_putc(m, '\n');
-
- print_context_stats(m, i915);
-
- return 0;
-}
-
-static void gen8_display_interrupt_info(struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe) {
- enum intel_display_power_domain power_domain;
- intel_wakeref_t wakeref;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv,
- power_domain);
- if (!wakeref) {
- seq_printf(m, "Pipe %c power disabled\n",
- pipe_name(pipe));
- continue;
- }
- seq_printf(m, "Pipe %c IMR:\t%08x\n",
- pipe_name(pipe),
- I915_READ(GEN8_DE_PIPE_IMR(pipe)));
- seq_printf(m, "Pipe %c IIR:\t%08x\n",
- pipe_name(pipe),
- I915_READ(GEN8_DE_PIPE_IIR(pipe)));
- seq_printf(m, "Pipe %c IER:\t%08x\n",
- pipe_name(pipe),
- I915_READ(GEN8_DE_PIPE_IER(pipe)));
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
- }
-
- seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
- I915_READ(GEN8_DE_PORT_IMR));
- seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
- I915_READ(GEN8_DE_PORT_IIR));
- seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
- I915_READ(GEN8_DE_PORT_IER));
-
- seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
- I915_READ(GEN8_DE_MISC_IMR));
- seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
- I915_READ(GEN8_DE_MISC_IIR));
- seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
- I915_READ(GEN8_DE_MISC_IER));
-
- seq_printf(m, "PCU interrupt mask:\t%08x\n",
- I915_READ(GEN8_PCU_IMR));
- seq_printf(m, "PCU interrupt identity:\t%08x\n",
- I915_READ(GEN8_PCU_IIR));
- seq_printf(m, "PCU interrupt enable:\t%08x\n",
- I915_READ(GEN8_PCU_IER));
-}
-
-static int i915_interrupt_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
- int i, pipe;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- if (IS_CHERRYVIEW(dev_priv)) {
- intel_wakeref_t pref;
-
- seq_printf(m, "Master Interrupt Control:\t%08x\n",
- I915_READ(GEN8_MASTER_IRQ));
-
- seq_printf(m, "Display IER:\t%08x\n",
- I915_READ(VLV_IER));
- seq_printf(m, "Display IIR:\t%08x\n",
- I915_READ(VLV_IIR));
- seq_printf(m, "Display IIR_RW:\t%08x\n",
- I915_READ(VLV_IIR_RW));
- seq_printf(m, "Display IMR:\t%08x\n",
- I915_READ(VLV_IMR));
- for_each_pipe(dev_priv, pipe) {
- enum intel_display_power_domain power_domain;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- pref = intel_display_power_get_if_enabled(dev_priv,
- power_domain);
- if (!pref) {
- seq_printf(m, "Pipe %c power disabled\n",
- pipe_name(pipe));
- continue;
- }
-
- seq_printf(m, "Pipe %c stat:\t%08x\n",
- pipe_name(pipe),
- I915_READ(PIPESTAT(pipe)));
-
- intel_display_power_put(dev_priv, power_domain, pref);
- }
-
- pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
- seq_printf(m, "Port hotplug:\t%08x\n",
- I915_READ(PORT_HOTPLUG_EN));
- seq_printf(m, "DPFLIPSTAT:\t%08x\n",
- I915_READ(VLV_DPFLIPSTAT));
- seq_printf(m, "DPINVGTT:\t%08x\n",
- I915_READ(DPINVGTT));
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
-
- for (i = 0; i < 4; i++) {
- seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IMR(i)));
- seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IIR(i)));
- seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IER(i)));
- }
-
- seq_printf(m, "PCU interrupt mask:\t%08x\n",
- I915_READ(GEN8_PCU_IMR));
- seq_printf(m, "PCU interrupt identity:\t%08x\n",
- I915_READ(GEN8_PCU_IIR));
- seq_printf(m, "PCU interrupt enable:\t%08x\n",
- I915_READ(GEN8_PCU_IER));
- } else if (INTEL_GEN(dev_priv) >= 11) {
- if (HAS_MASTER_UNIT_IRQ(dev_priv))
- seq_printf(m, "Master Unit Interrupt Control: %08x\n",
- I915_READ(DG1_MSTR_UNIT_INTR));
-
- seq_printf(m, "Master Interrupt Control: %08x\n",
- I915_READ(GEN11_GFX_MSTR_IRQ));
-
- seq_printf(m, "Render/Copy Intr Enable: %08x\n",
- I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
- seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
- I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
- seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
- I915_READ(GEN11_GUC_SG_INTR_ENABLE));
- seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
- I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
- seq_printf(m, "Crypto Intr Enable:\t %08x\n",
- I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
- seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
- I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
-
- seq_printf(m, "Display Interrupt Control:\t%08x\n",
- I915_READ(GEN11_DISPLAY_INT_CTL));
-
- gen8_display_interrupt_info(m);
- } else if (INTEL_GEN(dev_priv) >= 8) {
- seq_printf(m, "Master Interrupt Control:\t%08x\n",
- I915_READ(GEN8_MASTER_IRQ));
-
- for (i = 0; i < 4; i++) {
- seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IMR(i)));
- seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IIR(i)));
- seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IER(i)));
- }
-
- gen8_display_interrupt_info(m);
- } else if (IS_VALLEYVIEW(dev_priv)) {
- intel_wakeref_t pref;
-
- seq_printf(m, "Display IER:\t%08x\n",
- I915_READ(VLV_IER));
- seq_printf(m, "Display IIR:\t%08x\n",
- I915_READ(VLV_IIR));
- seq_printf(m, "Display IIR_RW:\t%08x\n",
- I915_READ(VLV_IIR_RW));
- seq_printf(m, "Display IMR:\t%08x\n",
- I915_READ(VLV_IMR));
- for_each_pipe(dev_priv, pipe) {
- enum intel_display_power_domain power_domain;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- pref = intel_display_power_get_if_enabled(dev_priv,
- power_domain);
- if (!pref) {
- seq_printf(m, "Pipe %c power disabled\n",
- pipe_name(pipe));
- continue;
- }
-
- seq_printf(m, "Pipe %c stat:\t%08x\n",
- pipe_name(pipe),
- I915_READ(PIPESTAT(pipe)));
- intel_display_power_put(dev_priv, power_domain, pref);
- }
-
- seq_printf(m, "Master IER:\t%08x\n",
- I915_READ(VLV_MASTER_IER));
-
- seq_printf(m, "Render IER:\t%08x\n",
- I915_READ(GTIER));
- seq_printf(m, "Render IIR:\t%08x\n",
- I915_READ(GTIIR));
- seq_printf(m, "Render IMR:\t%08x\n",
- I915_READ(GTIMR));
-
- seq_printf(m, "PM IER:\t\t%08x\n",
- I915_READ(GEN6_PMIER));
- seq_printf(m, "PM IIR:\t\t%08x\n",
- I915_READ(GEN6_PMIIR));
- seq_printf(m, "PM IMR:\t\t%08x\n",
- I915_READ(GEN6_PMIMR));
-
- pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
- seq_printf(m, "Port hotplug:\t%08x\n",
- I915_READ(PORT_HOTPLUG_EN));
- seq_printf(m, "DPFLIPSTAT:\t%08x\n",
- I915_READ(VLV_DPFLIPSTAT));
- seq_printf(m, "DPINVGTT:\t%08x\n",
- I915_READ(DPINVGTT));
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
-
- } else if (!HAS_PCH_SPLIT(dev_priv)) {
- seq_printf(m, "Interrupt enable: %08x\n",
- I915_READ(GEN2_IER));
- seq_printf(m, "Interrupt identity: %08x\n",
- I915_READ(GEN2_IIR));
- seq_printf(m, "Interrupt mask: %08x\n",
- I915_READ(GEN2_IMR));
- for_each_pipe(dev_priv, pipe)
- seq_printf(m, "Pipe %c stat: %08x\n",
- pipe_name(pipe),
- I915_READ(PIPESTAT(pipe)));
- } else {
- seq_printf(m, "North Display Interrupt enable: %08x\n",
- I915_READ(DEIER));
- seq_printf(m, "North Display Interrupt identity: %08x\n",
- I915_READ(DEIIR));
- seq_printf(m, "North Display Interrupt mask: %08x\n",
- I915_READ(DEIMR));
- seq_printf(m, "South Display Interrupt enable: %08x\n",
- I915_READ(SDEIER));
- seq_printf(m, "South Display Interrupt identity: %08x\n",
- I915_READ(SDEIIR));
- seq_printf(m, "South Display Interrupt mask: %08x\n",
- I915_READ(SDEIMR));
- seq_printf(m, "Graphics Interrupt enable: %08x\n",
- I915_READ(GTIER));
- seq_printf(m, "Graphics Interrupt identity: %08x\n",
- I915_READ(GTIIR));
- seq_printf(m, "Graphics Interrupt mask: %08x\n",
- I915_READ(GTIMR));
- }
-
- if (INTEL_GEN(dev_priv) >= 11) {
- seq_printf(m, "RCS Intr Mask:\t %08x\n",
- I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
- seq_printf(m, "BCS Intr Mask:\t %08x\n",
- I915_READ(GEN11_BCS_RSVD_INTR_MASK));
- seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
- I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
- seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
- I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
- seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
- I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
- seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
- I915_READ(GEN11_GUC_SG_INTR_MASK));
- seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
- I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
- seq_printf(m, "Crypto Intr Mask:\t %08x\n",
- I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
- seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
- I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
-
- } else if (INTEL_GEN(dev_priv) >= 6) {
- for_each_uabi_engine(engine, dev_priv) {
- seq_printf(m,
- "Graphics Interrupt mask (%s): %08x\n",
- engine->name, ENGINE_READ(engine, RING_IMR));
- }
- }
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- unsigned int i;
-
- seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
-
- rcu_read_lock();
- for (i = 0; i < i915->ggtt.num_fences; i++) {
- struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
- struct i915_vma *vma = reg->vma;
-
- seq_printf(m, "Fence %d, pin count = %d, object = ",
- i, atomic_read(&reg->pin_count));
- if (!vma)
- seq_puts(m, "unused");
- else
- i915_debugfs_describe_obj(m, vma->obj);
- seq_putc(m, '\n');
- }
- rcu_read_unlock();
return 0;
}
@@ -802,7 +359,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 rpmodectl, freq_sts;
- rpmodectl = I915_READ(GEN6_RP_CONTROL);
+ rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL);
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
seq_printf(m, "HW control enabled: %s\n",
@@ -847,19 +404,19 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
int max_freq;
- rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+ rp_state_limits = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_LIMITS);
if (IS_GEN9_LP(dev_priv)) {
- rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
- gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
+ rp_state_cap = intel_uncore_read(&dev_priv->uncore, BXT_RP_STATE_CAP);
+ gt_perf_status = intel_uncore_read(&dev_priv->uncore, BXT_GT_PERF_STATUS);
} else {
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ rp_state_cap = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_CAP);
+ gt_perf_status = intel_uncore_read(&dev_priv->uncore, GEN6_GT_PERF_STATUS);
}
/* RPSTAT1 is in the GT power well */
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
- reqf = I915_READ(GEN6_RPNSWREQ);
+ reqf = intel_uncore_read(&dev_priv->uncore, GEN6_RPNSWREQ);
if (INTEL_GEN(dev_priv) >= 9)
reqf >>= 23;
else {
@@ -871,24 +428,24 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
}
reqf = intel_gpu_freq(rps, reqf);
- rpmodectl = I915_READ(GEN6_RP_CONTROL);
- rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
- rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
-
- rpstat = I915_READ(GEN6_RPSTAT1);
- rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
- rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
- rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
- rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
- rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
- rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
+ rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL);
+ rpinclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_UP_THRESHOLD);
+ rpdeclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_DOWN_THRESHOLD);
+
+ rpstat = intel_uncore_read(&dev_priv->uncore, GEN6_RPSTAT1);
+ rpupei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
+ rpcurup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
+ rpprevup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
+ rpdownei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
+ rpcurdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
+ rpprevdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
cagf = intel_rps_read_actual_frequency(rps);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
if (INTEL_GEN(dev_priv) >= 11) {
- pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
- pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
+ pm_ier = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ pm_imr = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
/*
* The equivalent to the PM ISR & IIR cannot be read
* without affecting the current state of the system
@@ -896,17 +453,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
pm_isr = 0;
pm_iir = 0;
} else if (INTEL_GEN(dev_priv) >= 8) {
- pm_ier = I915_READ(GEN8_GT_IER(2));
- pm_imr = I915_READ(GEN8_GT_IMR(2));
- pm_isr = I915_READ(GEN8_GT_ISR(2));
- pm_iir = I915_READ(GEN8_GT_IIR(2));
+ pm_ier = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IER(2));
+ pm_imr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IMR(2));
+ pm_isr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_ISR(2));
+ pm_iir = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IIR(2));
} else {
- pm_ier = I915_READ(GEN6_PMIER);
- pm_imr = I915_READ(GEN6_PMIMR);
- pm_isr = I915_READ(GEN6_PMISR);
- pm_iir = I915_READ(GEN6_PMIIR);
+ pm_ier = intel_uncore_read(&dev_priv->uncore, GEN6_PMIER);
+ pm_imr = intel_uncore_read(&dev_priv->uncore, GEN6_PMIMR);
+ pm_isr = intel_uncore_read(&dev_priv->uncore, GEN6_PMISR);
+ pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
}
- pm_mask = I915_READ(GEN6_PMINTRMSK);
+ pm_mask = intel_uncore_read(&dev_priv->uncore, GEN6_PMINTRMSK);
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
@@ -936,27 +493,27 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
- seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
+ seq_printf(m, "RP CUR UP EI: %d (%lldns)\n",
rpupei,
intel_gt_pm_interval_to_ns(&dev_priv->gt, rpupei));
- seq_printf(m, "RP CUR UP: %d (%dun)\n",
+ seq_printf(m, "RP CUR UP: %d (%lldun)\n",
rpcurup,
intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurup));
- seq_printf(m, "RP PREV UP: %d (%dns)\n",
+ seq_printf(m, "RP PREV UP: %d (%lldns)\n",
rpprevup,
intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevup));
seq_printf(m, "Up threshold: %d%%\n",
rps->power.up_threshold);
- seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
+ seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n",
rpdownei,
intel_gt_pm_interval_to_ns(&dev_priv->gt,
rpdownei));
- seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
+ seq_printf(m, "RP CUR DOWN: %d (%lldns)\n",
rpcurdown,
intel_gt_pm_interval_to_ns(&dev_priv->gt,
rpcurdown));
- seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
+ seq_printf(m, "RP PREV DOWN: %d (%lldns)\n",
rpprevdown,
intel_gt_pm_interval_to_ns(&dev_priv->gt,
rpprevdown));
@@ -1011,111 +568,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
return 0;
}
-static int i915_ring_freq_table(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_rps *rps = &dev_priv->gt.rps;
- unsigned int max_gpu_freq, min_gpu_freq;
- intel_wakeref_t wakeref;
- int gpu_freq, ia_freq;
-
- if (!HAS_LLC(dev_priv))
- return -ENODEV;
-
- min_gpu_freq = rps->min_freq;
- max_gpu_freq = rps->max_freq;
- if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- /* Convert GT frequency to 50 HZ units */
- min_gpu_freq /= GEN9_FREQ_SCALER;
- max_gpu_freq /= GEN9_FREQ_SCALER;
- }
-
- seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
- ia_freq = gpu_freq;
- sandybridge_pcode_read(dev_priv,
- GEN6_PCODE_READ_MIN_FREQ_TABLE,
- &ia_freq, NULL);
- seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
- intel_gpu_freq(rps,
- (gpu_freq *
- (IS_GEN9_BC(dev_priv) ||
- INTEL_GEN(dev_priv) >= 10 ?
- GEN9_FREQ_SCALER : 1))),
- ((ia_freq >> 0) & 0xff) * 100,
- ((ia_freq >> 8) & 0xff) * 100);
- }
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
-{
- seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
- ring->space, ring->head, ring->tail, ring->emit);
-}
-
-static int i915_context_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct i915_gem_context *ctx, *cn;
-
- spin_lock(&i915->gem.contexts.lock);
- list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
- struct i915_gem_engines_iter it;
- struct intel_context *ce;
-
- if (!kref_get_unless_zero(&ctx->ref))
- continue;
-
- spin_unlock(&i915->gem.contexts.lock);
-
- seq_puts(m, "HW context ");
- if (ctx->pid) {
- struct task_struct *task;
-
- task = get_pid_task(ctx->pid, PIDTYPE_PID);
- if (task) {
- seq_printf(m, "(%s [%d]) ",
- task->comm, task->pid);
- put_task_struct(task);
- }
- } else if (IS_ERR(ctx->file_priv)) {
- seq_puts(m, "(deleted) ");
- } else {
- seq_puts(m, "(kernel) ");
- }
-
- seq_putc(m, ctx->remap_slice ? 'R' : 'r');
- seq_putc(m, '\n');
-
- for_each_gem_engine(ce,
- i915_gem_context_lock_engines(ctx), it) {
- if (intel_context_pin_if_active(ce)) {
- seq_printf(m, "%s: ", ce->engine->name);
- if (ce->state)
- i915_debugfs_describe_obj(m, ce->state->obj);
- describe_ctx_ring(m, ce->ring);
- seq_putc(m, '\n');
- intel_context_unpin(ce);
- }
- }
- i915_gem_context_unlock_engines(ctx);
-
- seq_putc(m, '\n');
-
- spin_lock(&i915->gem.contexts.lock);
- list_safe_reset_next(ctx, cn, link);
- i915_gem_context_put(ctx);
- }
- spin_unlock(&i915->gem.contexts.lock);
-
- return 0;
-}
-
static const char *swizzle_string(unsigned swizzle)
{
switch (swizzle) {
@@ -1193,20 +645,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
return 0;
}
-static const char *rps_power_to_str(unsigned int power)
-{
- static const char * const strings[] = {
- [LOW_POWER] = "low power",
- [BETWEEN] = "mixed",
- [HIGH_POWER] = "high power",
- };
-
- if (power >= ARRAY_SIZE(strings) || !strings[power])
- return "unknown";
-
- return strings[power];
-}
-
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1231,42 +669,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_gpu_freq(rps, rps->efficient_freq),
intel_gpu_freq(rps, rps->boost_freq));
- seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
-
- if (INTEL_GEN(dev_priv) >= 6 && intel_rps_is_active(rps)) {
- u32 rpup, rpupei;
- u32 rpdown, rpdownei;
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
- rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
- rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
- rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
- rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
- seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
- rps_power_to_str(rps->power.mode));
- seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
- rpup && rpupei ? 100 * rpup / rpupei : 0,
- rps->power.up_threshold);
- seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
- rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
- rps->power.down_threshold);
- } else {
- seq_puts(m, "\nRPS Autotuning inactive\n");
- }
-
- return 0;
-}
-
-static int i915_llc(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const bool edram = INTEL_GEN(dev_priv) > 8;
-
- seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
- seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
- dev_priv->edram_size_mb);
+ seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
return 0;
}
@@ -1280,7 +683,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
seq_puts(m, "Runtime power management not supported\n");
seq_printf(m, "Runtime power status: %s\n",
- enableddisabled(!dev_priv->power_domains.wakeref));
+ enableddisabled(!dev_priv->power_domains.init_wakeref));
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
seq_printf(m, "IRQs disabled: %s\n",
@@ -1306,34 +709,28 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
static int i915_engine_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_i915_private *i915 = node_to_i915(m->private);
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
struct drm_printer p;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- seq_printf(m, "GT awake? %s [%d]\n",
- yesno(dev_priv->gt.awake),
- atomic_read(&dev_priv->gt.wakeref.count));
- seq_printf(m, "CS timestamp frequency: %u Hz\n",
- RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_hz);
+ seq_printf(m, "GT awake? %s [%d], %llums\n",
+ yesno(i915->gt.awake),
+ atomic_read(&i915->gt.wakeref.count),
+ ktime_to_ms(intel_gt_get_awake_time(&i915->gt)));
+ seq_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
+ i915->gt.clock_frequency,
+ i915->gt.clock_period_ns);
p = drm_seq_file_printer(m);
- for_each_uabi_engine(engine, dev_priv)
+ for_each_uabi_engine(engine, i915)
intel_engine_dump(engine, &p, "%s\n", engine->name);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_gt_show_timelines(&i915->gt, &p, i915_request_show_with_schedule);
- return 0;
-}
-
-static int i915_shrinker_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
-
- seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
- seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return 0;
}
@@ -1411,7 +808,7 @@ i915_perf_noa_delay_set(void *data, u64 val)
* This would lead to infinite waits as we're doing timestamp
* difference on the CS with only 32bits.
*/
- if (i915_cs_timestamp_ns_to_ticks(i915, val) > U32_MAX)
+ if (intel_gt_ns_to_clock_interval(&i915->gt, val) > U32_MAX)
return -EINVAL;
atomic64_set(&i915->perf.noa_programming_delay, val);
@@ -1529,55 +926,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
i915_drop_caches_get, i915_drop_caches_set,
"0x%08llx\n");
-static int
-i915_cache_sharing_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
- intel_wakeref_t wakeref;
- u32 snpcr = 0;
-
- if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
- return -ENODEV;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
-
- *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
-
- return 0;
-}
-
-static int
-i915_cache_sharing_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- intel_wakeref_t wakeref;
-
- if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
- return -ENODEV;
-
- if (val > 3)
- return -EINVAL;
-
- drm_dbg(&dev_priv->drm,
- "Manually setting uncore sharing to %llu\n", val);
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- u32 snpcr;
-
- /* Update the cache sharing policy here as well */
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
- snpcr &= ~GEN6_MBC_SNPCR_MASK;
- snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
- I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
- }
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
- i915_cache_sharing_get, i915_cache_sharing_set,
- "%llu\n");
-
static int i915_sseu_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -1621,16 +969,10 @@ static const struct file_operations i915_forcewake_fops = {
static const struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
- {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
- {"i915_gem_interrupt", i915_interrupt_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
- {"i915_ring_freq_table", i915_ring_freq_table, 0},
- {"i915_context_status", i915_context_status, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
- {"i915_llc", i915_llc, 0},
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
{"i915_engine_info", i915_engine_info, 0},
- {"i915_shrinker_info", i915_shrinker_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
{"i915_sseu_status", i915_sseu_status, 0},
{"i915_rps_boost_info", i915_rps_boost_info, 0},
@@ -1643,7 +985,6 @@ static const struct i915_debugfs_files {
} i915_debugfs_files[] = {
{"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
{"i915_wedged", &i915_wedged_fops},
- {"i915_cache_sharing", &i915_cache_sharing_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
{"i915_error_state", &i915_error_state_fops},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 99eb0d7bbc44..2febda554bca 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -58,6 +58,7 @@
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pipe_crc.h"
+#include "display/intel_pps.h"
#include "display/intel_sprite.h"
#include "display/intel_vga.h"
@@ -410,6 +411,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev_priv);
+ intel_device_info_runtime_init(dev_priv);
ret = intel_gt_init_mmio(&dev_priv->gt);
if (ret)
@@ -516,8 +518,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
- intel_device_info_runtime_init(dev_priv);
-
if (HAS_PPGTT(dev_priv)) {
if (intel_vgpu_active(dev_priv) &&
!intel_vgpu_has_full_ppgtt(dev_priv)) {
@@ -609,14 +609,15 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
goto err_msi;
intel_opregion_setup(dev_priv);
+
+ intel_pcode_init(dev_priv);
+
/*
- * Fill the dram structure to get the system raw bandwidth and
- * dram info. This will be used for memory latency calculation.
+ * Fill the dram structure to get the system dram info. This will be
+ * used for memory latency calculation.
*/
intel_dram_detect(dev_priv);
- intel_pcode_init(dev_priv);
-
intel_bw_init_hw(dev_priv);
return 0;
@@ -733,6 +734,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
* events.
*/
drm_kms_helper_poll_fini(&dev_priv->drm);
+ drm_atomic_helper_shutdown(&dev_priv->drm);
intel_gt_driver_unregister(&dev_priv->gt);
acpi_video_unregister();
@@ -935,8 +937,6 @@ void i915_driver_remove(struct drm_i915_private *i915)
i915_gem_suspend(i915);
- drm_atomic_helper_shutdown(&i915->drm);
-
intel_gvt_driver_remove(i915);
intel_modeset_driver_remove(i915);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c6964f82a1bb..781e232b501e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -79,9 +79,9 @@
#include "gem/i915_gem_shrinker.h"
#include "gem/i915_gem_stolen.h"
-#include "gt/intel_lrc.h"
#include "gt/intel_engine.h"
#include "gt/intel_gt_types.h"
+#include "gt/intel_region_lmem.h"
#include "gt/intel_workarounds.h"
#include "gt/uc/intel_uc.h"
@@ -103,7 +103,6 @@
#include "i915_vma.h"
#include "i915_irq.h"
-#include "intel_region_lmem.h"
/* General customization:
*/
@@ -416,6 +415,7 @@ struct intel_fbc {
u16 gen9_wa_cfb_stride;
u16 interval;
s8 fence_id;
+ bool psr2_active;
} state_cache;
/*
@@ -1122,23 +1122,11 @@ struct drm_i915_private {
* crtc_state->wm.need_postvbl_update.
*/
struct mutex wm_mutex;
-
- /*
- * Set during HW readout of watermarks/DDB. Some platforms
- * need to know when we're still using BIOS-provided values
- * (which we don't fully trust).
- *
- * FIXME get rid of this.
- */
- bool distrust_bios_wm;
} wm;
struct dram_info {
- bool valid;
- bool is_16gb_dimm;
+ bool wm_lv_0_adjust_needed;
u8 num_channels;
- u8 ranks;
- u32 bandwidth_kbps;
bool symmetric_memory;
enum intel_dram_type {
INTEL_DRAM_UNKNOWN,
@@ -1147,6 +1135,7 @@ struct drm_i915_private {
INTEL_DRAM_LPDDR3,
INTEL_DRAM_LPDDR4
} type;
+ u8 num_qgv_points;
} dram_info;
struct intel_bw_info {
@@ -1169,9 +1158,6 @@ struct drm_i915_private {
struct i915_gem_contexts {
spinlock_t lock; /* locks list */
struct list_head list;
-
- struct llist_head free_list;
- struct work_struct free_work;
} contexts;
/*
@@ -1185,6 +1171,8 @@ struct drm_i915_private {
struct file *mmap_singleton;
} gem;
+ u8 framestart_delay;
+
u8 pch_ssc_use;
/* For i915gm/i945gm vblank irq workaround */
@@ -1569,16 +1557,30 @@ enum {
TGL_REVID_D0,
};
-extern const struct i915_rev_steppings tgl_uy_revids[];
-extern const struct i915_rev_steppings tgl_revids[];
+#define TGL_UY_REVIDS_SIZE 4
+#define TGL_REVIDS_SIZE 2
+
+extern const struct i915_rev_steppings tgl_uy_revids[TGL_UY_REVIDS_SIZE];
+extern const struct i915_rev_steppings tgl_revids[TGL_REVIDS_SIZE];
static inline const struct i915_rev_steppings *
tgl_revids_get(struct drm_i915_private *dev_priv)
{
- if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv))
- return &tgl_uy_revids[INTEL_REVID(dev_priv)];
- else
- return &tgl_revids[INTEL_REVID(dev_priv)];
+ u8 revid = INTEL_REVID(dev_priv);
+ u8 size;
+ const struct i915_rev_steppings *tgl_revid_tbl;
+
+ if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
+ tgl_revid_tbl = tgl_uy_revids;
+ size = ARRAY_SIZE(tgl_uy_revids);
+ } else {
+ tgl_revid_tbl = tgl_revids;
+ size = ARRAY_SIZE(tgl_revids);
+ }
+
+ revid = min_t(u8, revid, size - 1);
+
+ return &tgl_revid_tbl[revid];
}
#define IS_TGL_DISP_REVID(p, since, until) \
@@ -1588,14 +1590,14 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
#define IS_TGL_UY_GT_REVID(p, since, until) \
((IS_TGL_U(p) || IS_TGL_Y(p)) && \
- tgl_uy_revids[INTEL_REVID(p)].gt_stepping >= (since) && \
- tgl_uy_revids[INTEL_REVID(p)].gt_stepping <= (until))
+ tgl_revids_get(p)->gt_stepping >= (since) && \
+ tgl_revids_get(p)->gt_stepping <= (until))
#define IS_TGL_GT_REVID(p, since, until) \
(IS_TIGERLAKE(p) && \
!(IS_TGL_U(p) || IS_TGL_Y(p)) && \
- tgl_revids[INTEL_REVID(p)].gt_stepping >= (since) && \
- tgl_revids[INTEL_REVID(p)].gt_stepping <= (until))
+ tgl_revids_get(p)->gt_stepping >= (since) && \
+ tgl_revids_get(p)->gt_stepping <= (until))
#define RKL_REVID_A0 0x0
#define RKL_REVID_B0 0x1
@@ -1646,8 +1648,6 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
(INTEL_INFO(dev_priv)->has_logical_ring_contexts)
#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
(INTEL_INFO(dev_priv)->has_logical_ring_elsq)
-#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
- (INTEL_INFO(dev_priv)->has_logical_ring_preemption)
#define HAS_MASTER_UNIT_IRQ(dev_priv) (INTEL_INFO(dev_priv)->has_master_unit_irq)
@@ -1749,10 +1749,17 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
+#define HAS_VRR(i915) (INTEL_GEN(i915) >= 12)
+
/* Only valid when HAS_DISPLAY() is true */
#define INTEL_DISPLAY_ENABLED(dev_priv) \
(drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
+static inline bool run_as_guest(void)
+{
+ return !hypervisor_is_type(X86_HYPER_NATIVE);
+}
+
static inline bool intel_vtd_active(void)
{
#ifdef CONFIG_INTEL_IOMMU
@@ -1761,7 +1768,7 @@ static inline bool intel_vtd_active(void)
#endif
/* Running as a guest, we assume the host is enforcing VT'd */
- return !hypervisor_is_type(X86_HYPER_NATIVE);
+ return run_as_guest();
}
static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
@@ -1967,43 +1974,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-#define __I915_REG_OP(op__, dev_priv__, ...) \
- intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
-
-#define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__))
-#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
-
-#define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__))
-
-/* These are untraced mmio-accessors that are only valid to be used inside
- * critical sections, such as inside IRQ handlers, where forcewake is explicitly
- * controlled.
- *
- * Think twice, and think again, before using these.
- *
- * As an example, these accessors can possibly be used between:
- *
- * spin_lock_irq(&dev_priv->uncore.lock);
- * intel_uncore_forcewake_get__locked();
- *
- * and
- *
- * intel_uncore_forcewake_put__locked();
- * spin_unlock_irq(&dev_priv->uncore.lock);
- *
- *
- * Note: some registers may not need forcewake held, so
- * intel_uncore_forcewake_{get,put} can be omitted, see
- * intel_uncore_forcewake_for_reg().
- *
- * Certain architectures will die if the same cacheline is concurrently accessed
- * by different clients (e.g. on Ivybridge). Access to registers should
- * therefore generally be serialised, by either the dev_priv->uncore.lock or
- * a more localised lock guarding all access to that bank of registers.
- */
-#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
-#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
-
/* i915_mm.c */
int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
@@ -2026,16 +1996,4 @@ i915_coherent_map_type(struct drm_i915_private *i915)
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
}
-static inline u64 i915_cs_timestamp_ns_to_ticks(struct drm_i915_private *i915, u64 val)
-{
- return DIV_ROUND_UP_ULL(val * RUNTIME_INFO(i915)->cs_timestamp_frequency_hz,
- 1000000000);
-}
-
-static inline u64 i915_cs_timestamp_ticks_to_ns(struct drm_i915_private *i915, u64 val)
-{
- return div_u64(val * 1000000000,
- RUNTIME_INFO(i915)->cs_timestamp_frequency_hz);
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 58276694c848..9b04dff5eb32 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -180,108 +180,6 @@ try_again:
}
static int
-i915_gem_create(struct drm_file *file,
- struct intel_memory_region *mr,
- u64 *size_p,
- u32 *handle_p)
-{
- struct drm_i915_gem_object *obj;
- u32 handle;
- u64 size;
- int ret;
-
- GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
- size = round_up(*size_p, mr->min_page_size);
- if (size == 0)
- return -EINVAL;
-
- /* For most of the ABI (e.g. mmap) we think in system pages */
- GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
-
- /* Allocate the new object */
- obj = i915_gem_object_create_region(mr, size, 0);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- ret = drm_gem_handle_create(file, &obj->base, &handle);
- /* drop reference from allocate - handle holds it now */
- i915_gem_object_put(obj);
- if (ret)
- return ret;
-
- *handle_p = handle;
- *size_p = size;
- return 0;
-}
-
-int
-i915_gem_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- enum intel_memory_type mem_type;
- int cpp = DIV_ROUND_UP(args->bpp, 8);
- u32 format;
-
- switch (cpp) {
- case 1:
- format = DRM_FORMAT_C8;
- break;
- case 2:
- format = DRM_FORMAT_RGB565;
- break;
- case 4:
- format = DRM_FORMAT_XRGB8888;
- break;
- default:
- return -EINVAL;
- }
-
- /* have to work out size/pitch and return them */
- args->pitch = ALIGN(args->width * cpp, 64);
-
- /* align stride to page size so that we can remap */
- if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
- DRM_FORMAT_MOD_LINEAR))
- args->pitch = ALIGN(args->pitch, 4096);
-
- if (args->pitch < args->width)
- return -EINVAL;
-
- args->size = mul_u32_u32(args->pitch, args->height);
-
- mem_type = INTEL_MEMORY_SYSTEM;
- if (HAS_LMEM(to_i915(dev)))
- mem_type = INTEL_MEMORY_LOCAL;
-
- return i915_gem_create(file,
- intel_memory_region_by_type(to_i915(dev),
- mem_type),
- &args->size, &args->handle);
-}
-
-/**
- * Creates a new mm object and returns a handle to it.
- * @dev: drm device pointer
- * @data: ioctl data blob
- * @file: drm file pointer
- */
-int
-i915_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_private *i915 = to_i915(dev);
- struct drm_i915_gem_create *args = data;
-
- i915_gem_flush_free_objects(i915);
-
- return i915_gem_create(file,
- intel_memory_region_by_type(i915,
- INTEL_MEMORY_SYSTEM),
- &args->size, &args->handle);
-}
-
-static int
shmem_pread(struct page *page, int offset, int len, char __user *user_data,
bool needs_clflush)
{
@@ -1059,14 +957,14 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
i915_gem_object_is_tiled(obj) &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (obj->mm.madv == I915_MADV_WILLNEED) {
- GEM_BUG_ON(!obj->mm.quirked);
- __i915_gem_object_unpin_pages(obj);
- obj->mm.quirked = false;
+ GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_clear_tiling_quirk(obj);
+ i915_gem_object_make_shrinkable(obj);
}
if (args->madv == I915_MADV_WILLNEED) {
- GEM_BUG_ON(obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
+ GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_make_unshrinkable(obj);
+ i915_gem_object_set_tiling_quirk(obj);
}
}
@@ -1207,8 +1105,6 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
- i915_gem_driver_release__contexts(dev_priv);
-
intel_gt_driver_release(&dev_priv->gt);
intel_wa_list_free(&dev_priv->gt_wa_list);
@@ -1279,19 +1175,13 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
* the objects as well, see i915_gem_freeze()
*/
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- i915_gem_shrink(i915, -1UL, NULL, ~0);
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ i915_gem_shrink(i915, -1UL, NULL, ~0);
i915_gem_drain_freed_objects(i915);
- list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
- i915_gem_object_lock(obj, NULL);
- drm_WARN_ON(&i915->drm,
- i915_gem_object_set_to_cpu_domain(obj, true));
- i915_gem_object_unlock(obj);
- }
-
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ wbinvd_on_all_cpus();
+ list_for_each_entry(obj, &i915->mm.shrink_list, mm.link)
+ __start_cpu_write(obj);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index a4cad3f154ca..e622aee6e4be 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -38,11 +38,18 @@ struct drm_i915_private;
#define GEM_SHOW_DEBUG() drm_debug_enabled(DRM_UT_DRIVER)
+#ifdef CONFIG_DRM_I915_DEBUG_GEM_ONCE
+#define __GEM_BUG(cond) BUG()
+#else
+#define __GEM_BUG(cond) \
+ WARN(1, "%s:%d GEM_BUG_ON(%s)\n", __func__, __LINE__, __stringify(cond))
+#endif
+
#define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \
GEM_TRACE_ERR("%s:%d GEM_BUG_ON(%s)\n", \
__func__, __LINE__, __stringify(condition)); \
GEM_TRACE_DUMP(); \
- BUG(); \
+ __GEM_BUG(condition); \
} \
} while(0)
#define GEM_WARN_ON(expr) WARN_ON(expr)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index e1a66c8245b8..4d2d59a9942b 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -61,6 +61,17 @@ mark_free(struct drm_mm_scan *scan,
return drm_mm_scan_add_block(scan, &vma->node);
}
+static bool defer_evict(struct i915_vma *vma)
+{
+ if (i915_vma_is_active(vma))
+ return true;
+
+ if (i915_vma_is_scanout(vma))
+ return true;
+
+ return false;
+}
+
/**
* i915_gem_evict_something - Evict vmas to make room for binding a new one
* @vm: address space to evict from
@@ -150,7 +161,7 @@ search_again:
* To notice when we complete one full cycle, we record the
* first active element seen, before moving it to the tail.
*/
- if (active != ERR_PTR(-EAGAIN) && i915_vma_is_active(vma)) {
+ if (active != ERR_PTR(-EAGAIN) && defer_evict(vma)) {
if (!active)
active = vma;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c5ee1567f3d1..3ee2f682eff6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -55,22 +55,17 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct device *kdev = &dev_priv->drm.pdev->dev;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
- if (unlikely(ggtt->do_idle_maps)) {
- /* XXX This does not prevent more requests being submitted! */
- if (intel_gt_retire_requests_timeout(ggtt->vm.gt,
- -MAX_SCHEDULE_TIMEOUT)) {
- drm_err(&dev_priv->drm,
- "Failed to wait for idle; VT'd may hang.\n");
- /* Wait a bit, in hopes it avoids the hang */
- udelay(10);
- }
- }
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ /* XXX This does not prevent more requests being submitted! */
+ if (unlikely(ggtt->do_idle_maps))
+ /* Wait a bit, in the hope it avoids the hang */
+ usleep_range(100, 250);
- dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_sg(&i915->drm.pdev->dev,
+ pages->sgl, pages->nents,
+ PCI_DMA_BIDIRECTIONAL);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index f96032c60a12..75c3bfc2486e 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -154,7 +154,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
break;
case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
- value = RUNTIME_INFO(i915)->cs_timestamp_frequency_hz;
+ value = i915->gt.clock_frequency;
break;
case I915_PARAM_MMAP_GTT_COHERENT:
value = INTEL_INFO(i915)->has_coherent_ggtt;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d8cac4c5881f..f962693404b7 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -485,7 +485,7 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct i915_gem_context_coredump *ctx)
{
- const u32 period = RUNTIME_INFO(m->i915)->cs_timestamp_period_ns;
+ const u32 period = m->i915->gt.clock_period_ns;
err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
@@ -1051,7 +1051,9 @@ i915_vma_coredump_create(const struct intel_gt *gt,
for_each_sgt_daddr(dma, iter, vma->pages) {
void __iomem *s;
- s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE);
+ s = io_mapping_map_wc(&mem->iomap,
+ dma - mem->region.start,
+ PAGE_SIZE);
ret = compress_page(compress,
(void __force *)s, dst,
true);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 6cdb052e3850..1a701367a718 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -327,10 +327,10 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->irq_lock);
drm_WARN_ON(&dev_priv->drm, bits & ~mask);
- val = I915_READ(PORT_HOTPLUG_EN);
+ val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN);
val &= ~mask;
val |= bits;
- I915_WRITE(PORT_HOTPLUG_EN, val);
+ intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN, val);
}
/**
@@ -376,8 +376,8 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
if (new_val != dev_priv->irq_mask &&
!drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
dev_priv->irq_mask = new_val;
- I915_WRITE(DEIMR, dev_priv->irq_mask);
- POSTING_READ(DEIMR);
+ intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
+ intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
}
}
@@ -401,15 +401,15 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
- old_val = I915_READ(GEN8_DE_PORT_IMR);
+ old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
new_val = old_val;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
if (new_val != old_val) {
- I915_WRITE(GEN8_DE_PORT_IMR, new_val);
- POSTING_READ(GEN8_DE_PORT_IMR);
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
}
}
@@ -440,8 +440,8 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
if (new_val != dev_priv->de_irq_mask[pipe]) {
dev_priv->de_irq_mask[pipe] = new_val;
- I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
- POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
}
}
@@ -455,7 +455,7 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
- u32 sdeimr = I915_READ(SDEIMR);
+ u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
@@ -466,8 +466,8 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
- I915_WRITE(SDEIMR, sdeimr);
- POSTING_READ(SDEIMR);
+ intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
+ intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
}
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
@@ -533,8 +533,8 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv,
dev_priv->pipestat_irq_mask[pipe] |= status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
- I915_WRITE(reg, enable_mask | status_mask);
- POSTING_READ(reg);
+ intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
+ intel_uncore_posting_read(&dev_priv->uncore, reg);
}
void i915_disable_pipestat(struct drm_i915_private *dev_priv,
@@ -556,8 +556,8 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
- I915_WRITE(reg, enable_mask | status_mask);
- POSTING_READ(reg);
+ intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
+ intel_uncore_posting_read(&dev_priv->uncore, reg);
}
static bool i915_has_asle(struct drm_i915_private *dev_priv)
@@ -715,28 +715,18 @@ u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
if (!vblank->max_vblank_count)
return 0;
- return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
+ return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe));
}
-/*
- * On certain encoders on certain platforms, pipe
- * scanline register will not work to get the scanline,
- * since the timings are driven from the PORT or issues
- * with scanline register updates.
- * This function will use Framestamp and current
- * timestamp registers to calculate the scanline.
- */
-static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
+static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_vblank_crtc *vblank =
&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
const struct drm_display_mode *mode = &vblank->hwmode;
- u32 vblank_start = mode->crtc_vblank_start;
- u32 vtotal = mode->crtc_vtotal;
u32 htotal = mode->crtc_htotal;
u32 clock = mode->crtc_clock;
- u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
+ u32 scan_prev_time, scan_curr_time, scan_post_time;
/*
* To avoid the race condition where we might cross into the
@@ -763,8 +753,28 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
PIPE_FRMTMSTMP(crtc->pipe));
} while (scan_post_time != scan_prev_time);
- scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
- clock), 1000 * htotal);
+ return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
+ clock), 1000 * htotal);
+}
+
+/*
+ * On certain encoders on certain platforms, pipe
+ * scanline register will not work to get the scanline,
+ * since the timings are driven from the PORT or issues
+ * with scanline register updates.
+ * This function will use Framestamp and current
+ * timestamp registers to calculate the scanline.
+ */
+static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
+{
+ struct drm_vblank_crtc *vblank =
+ &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
+ const struct drm_display_mode *mode = &vblank->hwmode;
+ u32 vblank_start = mode->crtc_vblank_start;
+ u32 vtotal = mode->crtc_vtotal;
+ u32 scanline;
+
+ scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
scanline = min(scanline, vtotal - 1);
scanline = (scanline + vblank_start) % vtotal;
@@ -883,7 +893,20 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
if (stime)
*stime = ktime_get();
- if (use_scanline_counter) {
+ if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
+ int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
+
+ position = __intel_get_crtc_scanline(crtc);
+
+ /*
+ * Already exiting vblank? If so, shift our position
+ * so it looks like we're already apporaching the full
+ * vblank end. This should make the generated timestamp
+ * more or less match when the active portion will start.
+ */
+ if (position >= vbl_start && scanlines < position)
+ position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
+ } else if (use_scanline_counter) {
/* No obvious pixelcount register. Only query vertical
* scanout position from Display scan line register.
*/
@@ -1004,9 +1027,9 @@ static void ivb_parity_work(struct work_struct *work)
if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
goto out;
- misccpctl = I915_READ(GEN7_MISCCPCTL);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
- POSTING_READ(GEN7_MISCCPCTL);
+ misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
i915_reg_t reg;
@@ -1020,13 +1043,13 @@ static void ivb_parity_work(struct work_struct *work)
reg = GEN7_L3CDERRST1(slice);
- error_status = I915_READ(reg);
+ error_status = intel_uncore_read(&dev_priv->uncore, reg);
row = GEN7_PARITY_ERROR_ROW(error_status);
bank = GEN7_PARITY_ERROR_BANK(error_status);
subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
- I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
- POSTING_READ(reg);
+ intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
+ intel_uncore_posting_read(&dev_priv->uncore, reg);
parity_event[0] = I915_L3_PARITY_UEVENT "=1";
parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
@@ -1047,7 +1070,7 @@ static void ivb_parity_work(struct work_struct *work)
kfree(parity_event[1]);
}
- I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
out:
drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
@@ -1062,17 +1085,12 @@ static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
case HPD_PORT_TC1:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC1);
case HPD_PORT_TC2:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC2);
case HPD_PORT_TC3:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC3);
case HPD_PORT_TC4:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC4);
case HPD_PORT_TC5:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC5);
case HPD_PORT_TC6:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC6);
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
default:
return false;
}
@@ -1096,13 +1114,10 @@ static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
case HPD_PORT_A:
- return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_A);
case HPD_PORT_B:
- return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_B);
case HPD_PORT_C:
- return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_C);
case HPD_PORT_D:
- return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_D);
+ return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
default:
return false;
}
@@ -1112,17 +1127,12 @@ static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
case HPD_PORT_TC1:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC1);
case HPD_PORT_TC2:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC2);
case HPD_PORT_TC3:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC3);
case HPD_PORT_TC4:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC4);
case HPD_PORT_TC5:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC5);
case HPD_PORT_TC6:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC6);
+ return val & ICP_TC_HPD_LONG_DETECT(pin);
default:
return false;
}
@@ -1337,7 +1347,7 @@ static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
display_pipe_crc_irq_handler(dev_priv, pipe,
- I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
0, 0, 0, 0);
}
@@ -1345,11 +1355,11 @@ static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
display_pipe_crc_irq_handler(dev_priv, pipe,
- I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
- I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
- I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
- I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
- I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
}
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
@@ -1358,19 +1368,19 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
u32 res1, res2;
if (INTEL_GEN(dev_priv) >= 3)
- res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
+ res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
else
res1 = 0;
if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
+ res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
else
res2 = 0;
display_pipe_crc_irq_handler(dev_priv, pipe,
- I915_READ(PIPE_CRC_RES_RED(pipe)),
- I915_READ(PIPE_CRC_RES_GREEN(pipe)),
- I915_READ(PIPE_CRC_RES_BLUE(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
res1, res2);
}
@@ -1379,7 +1389,7 @@ static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
- I915_WRITE(PIPESTAT(pipe),
+ intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS);
@@ -1433,7 +1443,7 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
continue;
reg = PIPESTAT(pipe);
- pipe_stats[pipe] = I915_READ(reg) & status_mask;
+ pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
/*
@@ -1446,8 +1456,8 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
* an interrupt is still pending.
*/
if (pipe_stats[pipe]) {
- I915_WRITE(reg, pipe_stats[pipe]);
- I915_WRITE(reg, enable_mask);
+ intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
+ intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
}
}
spin_unlock(&dev_priv->irq_lock);
@@ -1530,6 +1540,9 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
intel_handle_vblank(dev_priv, pipe);
+ if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
+ flip_done_handler(dev_priv, pipe);
+
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
@@ -1563,18 +1576,18 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
* bits can itself generate a new hotplug interrupt :(
*/
for (i = 0; i < 10; i++) {
- u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
+ u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
if (tmp == 0)
return hotplug_status;
hotplug_status |= tmp;
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
}
drm_WARN_ONCE(&dev_priv->drm, 1,
"PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
- I915_READ(PORT_HOTPLUG_STAT));
+ intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
return hotplug_status;
}
@@ -1623,9 +1636,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 ier = 0;
- gt_iir = I915_READ(GTIIR);
- pm_iir = I915_READ(GEN6_PMIIR);
- iir = I915_READ(VLV_IIR);
+ gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
+ pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
+ iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
if (gt_iir == 0 && pm_iir == 0 && iir == 0)
break;
@@ -1645,14 +1658,14 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
* don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
* bits this time around.
*/
- I915_WRITE(VLV_MASTER_IER, 0);
- ier = I915_READ(VLV_IER);
- I915_WRITE(VLV_IER, 0);
+ intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
+ ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
+ intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
if (gt_iir)
- I915_WRITE(GTIIR, gt_iir);
+ intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
if (pm_iir)
- I915_WRITE(GEN6_PMIIR, pm_iir);
+ intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
@@ -1670,10 +1683,10 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
* from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
*/
if (iir)
- I915_WRITE(VLV_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
- I915_WRITE(VLV_IER, ier);
- I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+ intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
+ intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
if (gt_iir)
gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
@@ -1710,8 +1723,8 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 ier = 0;
- master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
- iir = I915_READ(VLV_IIR);
+ master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
+ iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
if (master_ctl == 0 && iir == 0)
break;
@@ -1731,9 +1744,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
* don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
* bits this time around.
*/
- I915_WRITE(GEN8_MASTER_IRQ, 0);
- ier = I915_READ(VLV_IER);
- I915_WRITE(VLV_IER, 0);
+ intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
+ ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
+ intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
@@ -1754,10 +1767,10 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
* from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
*/
if (iir)
- I915_WRITE(VLV_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
- I915_WRITE(VLV_IER, ier);
- I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+ intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
+ intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -1783,7 +1796,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
* zero. Not acking leads to "The master control interrupt lied (SDE)!"
* errors.
*/
- dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
if (!hotplug_trigger) {
u32 mask = PORTA_HOTPLUG_STATUS_MASK |
PORTD_HOTPLUG_STATUS_MASK |
@@ -1792,7 +1805,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
dig_hotplug_reg &= ~mask;
}
- I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
if (!hotplug_trigger)
return;
@@ -1837,7 +1850,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
for_each_pipe(dev_priv, pipe)
drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
- I915_READ(FDI_RX_IIR(pipe)));
+ intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
}
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
@@ -1856,7 +1869,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
{
- u32 err_int = I915_READ(GEN7_ERR_INT);
+ u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
enum pipe pipe;
if (err_int & ERR_INT_POISON)
@@ -1874,12 +1887,12 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
}
}
- I915_WRITE(GEN7_ERR_INT, err_int);
+ intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
}
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
{
- u32 serr_int = I915_READ(SERR_INT);
+ u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
enum pipe pipe;
if (serr_int & SERR_INT_POISON)
@@ -1889,7 +1902,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
- I915_WRITE(SERR_INT, serr_int);
+ intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
}
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
@@ -1922,7 +1935,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
for_each_pipe(dev_priv, pipe)
drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
- I915_READ(FDI_RX_IIR(pipe)));
+ intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
}
if (pch_iir & SDE_ERROR_CPT)
@@ -1938,8 +1951,8 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (ddi_hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
- I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
+ intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
ddi_hotplug_trigger, dig_hotplug_reg,
@@ -1950,8 +1963,8 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (tc_hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
- I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
+ intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
tc_hotplug_trigger, dig_hotplug_reg,
@@ -1976,8 +1989,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
- I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
@@ -1988,8 +2001,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (hotplug2_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
- I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug2_trigger, dig_hotplug_reg,
@@ -2009,8 +2022,8 @@ static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
- dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
- I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
+ intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
@@ -2042,6 +2055,9 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
if (de_iir & DE_PIPE_VBLANK(pipe))
intel_handle_vblank(dev_priv, pipe);
+ if (de_iir & DE_PLANE_FLIP_DONE(pipe))
+ flip_done_handler(dev_priv, pipe);
+
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
@@ -2051,7 +2067,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
- u32 pch_iir = I915_READ(SDEIIR);
+ u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
if (HAS_PCH_CPT(dev_priv))
cpt_irq_handler(dev_priv, pch_iir);
@@ -2059,7 +2075,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
ibx_irq_handler(dev_priv, pch_iir);
/* should clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
+ intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
}
if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
@@ -2079,10 +2095,10 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
ivb_err_int_handler(dev_priv);
if (de_iir & DE_EDP_PSR_INT_HSW) {
- u32 psr_iir = I915_READ(EDP_PSR_IIR);
+ u32 psr_iir = intel_uncore_read(&dev_priv->uncore, EDP_PSR_IIR);
intel_psr_irq_handler(dev_priv, psr_iir);
- I915_WRITE(EDP_PSR_IIR, psr_iir);
+ intel_uncore_write(&dev_priv->uncore, EDP_PSR_IIR, psr_iir);
}
if (de_iir & DE_AUX_CHANNEL_A_IVB)
@@ -2092,18 +2108,21 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
intel_opregion_asle_intr(dev_priv);
for_each_pipe(dev_priv, pipe) {
- if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
+ if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
intel_handle_vblank(dev_priv, pipe);
+
+ if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
+ flip_done_handler(dev_priv, pipe);
}
/* check event from PCH */
if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
- u32 pch_iir = I915_READ(SDEIIR);
+ u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
cpt_irq_handler(dev_priv, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
+ intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
}
}
@@ -2190,8 +2209,8 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
- dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
- I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
@@ -2210,8 +2229,8 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
if (trigger_tc) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
- I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
+ intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
trigger_tc, dig_hotplug_reg,
@@ -2222,8 +2241,8 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
if (trigger_tbt) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
- I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
+ intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
trigger_tbt, dig_hotplug_reg,
@@ -2300,8 +2319,8 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
else
iir_reg = EDP_PSR_IIR;
- psr_iir = I915_READ(iir_reg);
- I915_WRITE(iir_reg, psr_iir);
+ psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
+ intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
if (psr_iir)
found = true;
@@ -2325,7 +2344,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
* Incase of dual link, TE comes from DSI_1
* this is to check if dual link is enabled
*/
- val = I915_READ(TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
+ val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
val &= PORT_SYNC_MODE_ENABLE;
/*
@@ -2337,7 +2356,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
/* Check if DSI configured in command mode */
- val = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans));
+ val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
val = val & OP_MODE_MASK;
if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
@@ -2346,7 +2365,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
}
/* Get PIPE for handling VBLANK event */
- val = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
switch (val & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
pipe = PIPE_A;
@@ -2366,8 +2385,16 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
/* clear TE in dsi IIR */
port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
- tmp = I915_READ(DSI_INTR_IDENT_REG(port));
- I915_WRITE(DSI_INTR_IDENT_REG(port), tmp);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
+ intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
+}
+
+static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 9)
+ return GEN9_PIPE_PLANE1_FLIP_DONE;
+ else
+ return GEN8_PIPE_PRIMARY_FLIP_DONE;
}
static irqreturn_t
@@ -2378,9 +2405,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
enum pipe pipe;
if (master_ctl & GEN8_DE_MISC_IRQ) {
- iir = I915_READ(GEN8_DE_MISC_IIR);
+ iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
if (iir) {
- I915_WRITE(GEN8_DE_MISC_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
ret = IRQ_HANDLED;
gen8_de_misc_irq_handler(dev_priv, iir);
} else {
@@ -2390,9 +2417,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
- iir = I915_READ(GEN11_DE_HPD_IIR);
+ iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
if (iir) {
- I915_WRITE(GEN11_DE_HPD_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
ret = IRQ_HANDLED;
gen11_hpd_irq_handler(dev_priv, iir);
} else {
@@ -2402,11 +2429,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
if (master_ctl & GEN8_DE_PORT_IRQ) {
- iir = I915_READ(GEN8_DE_PORT_IIR);
+ iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
if (iir) {
bool found = false;
- I915_WRITE(GEN8_DE_PORT_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
ret = IRQ_HANDLED;
if (iir & gen8_de_port_aux_mask(dev_priv)) {
@@ -2459,7 +2486,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
continue;
- iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
+ iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
drm_err(&dev_priv->drm,
"The master control interrupt lied (DE PIPE)!\n");
@@ -2467,12 +2494,12 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
ret = IRQ_HANDLED;
- I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK)
intel_handle_vblank(dev_priv, pipe);
- if (iir & GEN9_PIPE_PLANE1_FLIP_DONE)
+ if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
flip_done_handler(dev_priv, pipe);
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
@@ -2496,9 +2523,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
* scheme also closed the SDE interrupt handling race we've seen
* on older pch-split platforms. But this needs testing.
*/
- iir = I915_READ(SDEIIR);
+ iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
if (iir) {
- I915_WRITE(SDEIIR, iir);
+ intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
ret = IRQ_HANDLED;
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
@@ -2741,7 +2768,7 @@ int i915gm_enable_vblank(struct drm_crtc *crtc)
* only when vblank interrupts are actually enabled.
*/
if (dev_priv->vblank_enabled++ == 0)
- I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
return i8xx_enable_vblank(crtc);
}
@@ -2798,16 +2825,16 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
else
port = PORT_A;
- tmp = I915_READ(DSI_INTR_MASK_REG(port));
+ tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port));
if (enable)
tmp &= ~DSI_TE_EVENT;
else
tmp |= DSI_TE_EVENT;
- I915_WRITE(DSI_INTR_MASK_REG(port), tmp);
+ intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port), tmp);
- tmp = I915_READ(DSI_INTR_IDENT_REG(port));
- I915_WRITE(DSI_INTR_IDENT_REG(port), tmp);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
+ intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
return true;
}
@@ -2835,19 +2862,6 @@ int bdw_enable_vblank(struct drm_crtc *crtc)
return 0;
}
-void skl_enable_flip_done(struct intel_crtc *crtc)
-{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- unsigned long irqflags;
-
- spin_lock_irqsave(&i915->irq_lock, irqflags);
-
- bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE1_FLIP_DONE);
-
- spin_unlock_irqrestore(&i915->irq_lock, irqflags);
-}
-
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
@@ -2869,7 +2883,7 @@ void i915gm_disable_vblank(struct drm_crtc *crtc)
i8xx_disable_vblank(crtc);
if (--dev_priv->vblank_enabled == 0)
- I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
void i965_disable_vblank(struct drm_crtc *crtc)
@@ -2912,19 +2926,6 @@ void bdw_disable_vblank(struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-void skl_disable_flip_done(struct intel_crtc *crtc)
-{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- unsigned long irqflags;
-
- spin_lock_irqsave(&i915->irq_lock, irqflags);
-
- bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE1_FLIP_DONE);
-
- spin_unlock_irqrestore(&i915->irq_lock, irqflags);
-}
-
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -2935,7 +2936,7 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv)
GEN3_IRQ_RESET(uncore, SDE);
if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
- I915_WRITE(SERR_INT, 0xffffffff);
+ intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
}
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
@@ -2948,7 +2949,7 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
- intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
i9xx_pipestat_irq_reset(dev_priv);
@@ -3011,8 +3012,8 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv)
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
{
- I915_WRITE(VLV_MASTER_IER, 0);
- POSTING_READ(VLV_MASTER_IER);
+ intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
+ intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
gen5_gt_irq_reset(&dev_priv->gt);
@@ -3117,13 +3118,10 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
struct intel_uncore *uncore = &dev_priv->uncore;
-
- u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
+ u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
+ gen8_de_pipe_flip_done_mask(dev_priv);
enum pipe pipe;
- if (INTEL_GEN(dev_priv) >= 9)
- extra_ier |= GEN9_PIPE_PLANE1_FLIP_DONE;
-
spin_lock_irq(&dev_priv->irq_lock);
if (!intel_irqs_enabled(dev_priv)) {
@@ -3165,8 +3163,8 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- I915_WRITE(GEN8_MASTER_IRQ, 0);
- POSTING_READ(GEN8_MASTER_IRQ);
+ intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
gen8_gt_irq_reset(&dev_priv->gt);
@@ -3212,7 +3210,7 @@ static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
* duration to 2ms (which is the minimum in the Display Port spec).
* The pulse duration bits are reserved on LPT+.
*/
- hotplug = I915_READ(PCH_PORT_HOTPLUG);
+ hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
hotplug &= ~(PORTA_HOTPLUG_ENABLE |
PORTB_HOTPLUG_ENABLE |
PORTC_HOTPLUG_ENABLE |
@@ -3221,7 +3219,7 @@ static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
PORTC_PULSE_DURATION_MASK |
PORTD_PULSE_DURATION_MASK);
hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables);
- I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
}
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3270,20 +3268,20 @@ static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
- hotplug = I915_READ(SHOTPLUG_CTL_DDI);
+ hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D));
hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables);
- I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
+ intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, hotplug);
}
static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
- hotplug = I915_READ(SHOTPLUG_CTL_TC);
+ hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
@@ -3291,7 +3289,7 @@ static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
ICP_TC_HPD_ENABLE(HPD_PORT_TC6));
hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables);
- I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
+ intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, hotplug);
}
static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3302,7 +3300,7 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
- I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+ intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3330,12 +3328,12 @@ static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 val;
- val = I915_READ(SOUTH_CHICKEN1);
+ val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
val |= (INVERT_DDIA_HPD |
INVERT_DDIB_HPD |
INVERT_DDIC_HPD |
INVERT_DDID_HPD);
- I915_WRITE(SOUTH_CHICKEN1, val);
+ intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
icp_hpd_irq_setup(dev_priv);
}
@@ -3344,7 +3342,7 @@ static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
- hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
+ hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
@@ -3352,14 +3350,14 @@ static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
- I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
+ intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, hotplug);
}
static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
- hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
+ hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
@@ -3367,7 +3365,7 @@ static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
- I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
+ intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, hotplug);
}
static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3378,11 +3376,11 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
- val = I915_READ(GEN11_DE_HPD_IMR);
+ val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
val &= ~hotplug_irqs;
val |= ~enabled_irqs & hotplug_irqs;
- I915_WRITE(GEN11_DE_HPD_IMR, val);
- POSTING_READ(GEN11_DE_HPD_IMR);
+ intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR, val);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
gen11_tc_hpd_detection_setup(dev_priv);
gen11_tbt_hpd_detection_setup(dev_priv);
@@ -3425,25 +3423,25 @@ static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
/* Display WA #1179 WaHardHangonHotPlug: cnp */
if (HAS_PCH_CNP(dev_priv)) {
- val = I915_READ(SOUTH_CHICKEN1);
+ val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
val |= CHASSIS_CLK_REQ_DURATION(0xf);
- I915_WRITE(SOUTH_CHICKEN1, val);
+ intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
}
/* Enable digital hotplug on the PCH */
- hotplug = I915_READ(PCH_PORT_HOTPLUG);
+ hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
hotplug &= ~(PORTA_HOTPLUG_ENABLE |
PORTB_HOTPLUG_ENABLE |
PORTC_HOTPLUG_ENABLE |
PORTD_HOTPLUG_ENABLE);
hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables);
- I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
- hotplug = I915_READ(PCH_PORT_HOTPLUG2);
+ hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
hotplug &= ~PORTE_HOTPLUG_ENABLE;
hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables);
- I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, hotplug);
}
static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3451,7 +3449,7 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
u32 hotplug_irqs, enabled_irqs;
if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
- I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+ intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
@@ -3482,11 +3480,11 @@ static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
* duration to 2ms (which is the minimum in the Display Port spec)
* The pulse duration bits are reserved on HSW+.
*/
- hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
+ hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE |
DIGITAL_PORTA_PULSE_DURATION_MASK);
hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables);
- I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
+ intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
}
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3536,7 +3534,7 @@ static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
- hotplug = I915_READ(PCH_PORT_HOTPLUG);
+ hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
hotplug &= ~(PORTA_HOTPLUG_ENABLE |
PORTB_HOTPLUG_ENABLE |
PORTC_HOTPLUG_ENABLE |
@@ -3544,7 +3542,7 @@ static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
BXT_DDIB_HPD_INVERT |
BXT_DDIC_HPD_INVERT);
hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables);
- I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
}
static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3598,6 +3596,9 @@ static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
+ DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
+ DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
+ DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
DE_DP_A_HOTPLUG_IVB);
} else {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
@@ -3605,6 +3606,8 @@ static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
DE_PIPEA_CRC_DONE | DE_POISON);
extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
+ DE_PLANE_FLIP_DONE(PLANE_A) |
+ DE_PLANE_FLIP_DONE(PLANE_B) |
DE_DP_A_HOTPLUG);
}
@@ -3664,8 +3667,8 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
- POSTING_READ(VLV_MASTER_IER);
+ intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+ intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
}
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -3695,11 +3698,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
de_port_masked |= DSI0_TE | DSI1_TE;
}
- de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
- GEN8_PIPE_FIFO_UNDERRUN;
-
- if (INTEL_GEN(dev_priv) >= 9)
- de_pipe_enables |= GEN9_PIPE_PLANE1_FLIP_DONE;
+ de_pipe_enables = de_pipe_masked |
+ GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
+ gen8_de_pipe_flip_done_mask(dev_priv);
de_port_enables = de_port_masked;
if (IS_GEN9_LP(dev_priv))
@@ -3778,14 +3779,14 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
- I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
+ intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
dg1_master_intr_enable(uncore->regs);
- POSTING_READ(DG1_MSTR_UNIT_INTR);
+ intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_UNIT_INTR);
} else {
gen11_master_intr_enable(uncore->regs);
- POSTING_READ(GEN11_GFX_MSTR_IRQ);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
}
}
@@ -3798,8 +3799,8 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- POSTING_READ(GEN8_MASTER_IRQ);
+ intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
}
static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
@@ -3889,11 +3890,11 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
{
u32 emr;
- *eir = I915_READ(EIR);
+ *eir = intel_uncore_read(&dev_priv->uncore, EIR);
- I915_WRITE(EIR, *eir);
+ intel_uncore_write(&dev_priv->uncore, EIR, *eir);
- *eir_stuck = I915_READ(EIR);
+ *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
if (*eir_stuck == 0)
return;
@@ -3907,9 +3908,9 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
* (or by a GPU reset) so we mask any bit that
* remains set.
*/
- emr = I915_READ(EMR);
- I915_WRITE(EMR, 0xffffffff);
- I915_WRITE(EMR, emr | *eir_stuck);
+ emr = intel_uncore_read(&dev_priv->uncore, EMR);
+ intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
+ intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
}
static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
@@ -3975,7 +3976,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv)
if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
}
i9xx_pipestat_irq_reset(dev_priv);
@@ -3989,7 +3990,7 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
- I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
+ intel_uncore_write(&dev_priv->uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
I915_ERROR_MEMORY_REFRESH));
/* Unmask the interrupts that we always want on. */
@@ -4042,7 +4043,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 iir;
- iir = I915_READ(GEN2_IIR);
+ iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
if (iir == 0)
break;
@@ -4059,7 +4060,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE(GEN2_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
@@ -4085,7 +4086,7 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = &dev_priv->uncore;
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
i9xx_pipestat_irq_reset(dev_priv);
@@ -4112,7 +4113,7 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
error_mask = ~(I915_ERROR_PAGE_TABLE |
I915_ERROR_MEMORY_REFRESH);
}
- I915_WRITE(EMR, error_mask);
+ intel_uncore_write(&dev_priv->uncore, EMR, error_mask);
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
@@ -4188,7 +4189,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 iir;
- iir = I915_READ(GEN2_IIR);
+ iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
if (iir == 0)
break;
@@ -4204,7 +4205,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE(GEN2_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 2efe609519ca..25f25cd95818 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -118,9 +118,6 @@ void i965_disable_vblank(struct drm_crtc *crtc);
void ilk_disable_vblank(struct drm_crtc *crtc);
void bdw_disable_vblank(struct drm_crtc *crtc);
-void skl_enable_flip_done(struct intel_crtc *crtc);
-void skl_disable_flip_done(struct intel_crtc *crtc);
-
void gen2_irq_reset(struct intel_uncore *uncore);
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
i915_reg_t iir, i915_reg_t ier);
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 43039dc8c607..666808cb3a32 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -62,7 +62,7 @@ static int remap_sg(pte_t *pte, unsigned long addr, void *data)
{
struct remap_pfn *r = data;
- if (GEM_WARN_ON(!r->sgt.pfn))
+ if (GEM_WARN_ON(!r->sgt.sgp))
return -EINVAL;
/* Special PTE are not associated with any struct page */
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 7f139ea4a90b..6939634e56ed 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -185,7 +185,7 @@ i915_param_named_unsafe(inject_probe_failure, uint, 0400,
i915_param_named(enable_dpcd_backlight, int, 0400,
"Enable support for DPCD backlight control"
- "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enabled)");
+ "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enable, 2=force VESA interface, 3=force Intel interface)");
#if IS_ENABLED(CONFIG_DRM_I915_GVT)
i915_param_named(enable_gvt, bool, 0400,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 330c03e2b4f7..f031966af5b7 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -32,6 +32,7 @@ struct drm_printer;
#define ENABLE_GUC_SUBMISSION BIT(0)
#define ENABLE_GUC_LOAD_HUC BIT(1)
+#define ENABLE_GUC_MASK GENMASK(1, 0)
/*
* Invoke param, a function-like macro, for each i915 param, with arguments:
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 11fe790b1969..020b5f561f07 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -455,6 +455,7 @@ static const struct intel_device_info snb_m_gt2_info = {
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
+ .has_reset_engine = true, \
.has_rps = true, \
.dma_mask_size = 40, \
.ppgtt_type = INTEL_PPGTT_ALIASING, \
@@ -513,6 +514,7 @@ static const struct intel_device_info vlv_info = {
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
.has_runtime_pm = 1,
.has_rc6 = 1,
+ .has_reset_engine = true,
.has_rps = true,
.display.has_gmch = 1,
.display.has_hotplug = 1,
@@ -571,8 +573,7 @@ static const struct intel_device_info hsw_gt3_info = {
.dma_mask_size = 39, \
.ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 48, \
- .has_64bit_reloc = 1, \
- .has_reset_engine = 1
+ .has_64bit_reloc = 1
#define BDW_PLATFORM \
GEN8_FEATURES, \
@@ -639,7 +640,6 @@ static const struct intel_device_info chv_info = {
GEN8_FEATURES, \
GEN(9), \
GEN9_DEFAULT_PAGE_SIZES, \
- .has_logical_ring_preemption = 1, \
.display.has_csr = 1, \
.has_gt_uc = 1, \
.display.has_hdcp = 1, \
@@ -700,7 +700,6 @@ static const struct intel_device_info skl_gt4_info = {
.has_rps = true, \
.display.has_dp_mst = 1, \
.has_logical_ring_contexts = 1, \
- .has_logical_ring_preemption = 1, \
.has_gt_uc = 1, \
.dma_mask_size = 39, \
.ppgtt_type = INTEL_PPGTT_FULL, \
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 649c26518d26..112ba5f2ce90 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -198,8 +198,11 @@
#include "gem/i915_gem_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
+#include "gt/intel_execlists_submission.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
-#include "gt/intel_lrc_reg.h"
+#include "gt/intel_gt_clock_utils.h"
+#include "gt/intel_lrc.h"
#include "gt/intel_ring.h"
#include "i915_drv.h"
@@ -1635,7 +1638,8 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
struct drm_i915_gem_object *bo;
struct i915_vma *vma;
const u64 delay_ticks = 0xffffffffffffffff -
- i915_cs_timestamp_ns_to_ticks(i915, atomic64_read(&stream->perf->noa_programming_delay));
+ intel_gt_ns_to_clock_interval(stream->perf->i915->ggtt.vm.gt,
+ atomic64_read(&stream->perf->noa_programming_delay));
const u32 base = stream->engine->mmio_base;
#define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
u32 *batch, *ts0, *cs, *jump;
@@ -3516,7 +3520,8 @@ err:
static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
{
- return i915_cs_timestamp_ticks_to_ns(perf->i915, 2ULL << exponent);
+ return intel_gt_clock_interval_to_ns(perf->i915->ggtt.vm.gt,
+ 2ULL << exponent);
}
/**
@@ -4370,11 +4375,11 @@ void i915_perf_init(struct drm_i915_private *i915)
if (perf->ops.enable_metric_set) {
mutex_init(&perf->lock);
- oa_sample_rate_hard_limit =
- RUNTIME_INFO(i915)->cs_timestamp_frequency_hz / 2;
+ /* Choose a representative limit */
+ oa_sample_rate_hard_limit = i915->gt.clock_frequency / 2;
mutex_init(&perf->metrics_lock);
- idr_init(&perf->metrics_idr);
+ idr_init_base(&perf->metrics_idr, 1);
/* We set up some ratelimit state to potentially throttle any
* _NOTES about spurious, invalid OA reports which we don't
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 9856479b56d8..2b88c0baa1bf 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -26,8 +26,6 @@
BIT(I915_SAMPLE_WAIT) | \
BIT(I915_SAMPLE_SEMA))
-#define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
-
static cpumask_t i915_pmu_cpumask;
static unsigned int i915_pmu_target_cpu = -1;
@@ -56,17 +54,42 @@ static bool is_engine_config(u64 config)
return config < __I915_PMU_OTHER(0);
}
-static unsigned int config_enabled_bit(u64 config)
+static unsigned int other_bit(const u64 config)
+{
+ unsigned int val;
+
+ switch (config) {
+ case I915_PMU_ACTUAL_FREQUENCY:
+ val = __I915_PMU_ACTUAL_FREQUENCY_ENABLED;
+ break;
+ case I915_PMU_REQUESTED_FREQUENCY:
+ val = __I915_PMU_REQUESTED_FREQUENCY_ENABLED;
+ break;
+ case I915_PMU_RC6_RESIDENCY:
+ val = __I915_PMU_RC6_RESIDENCY_ENABLED;
+ break;
+ default:
+ /*
+ * Events that do not require sampling, or tracking state
+ * transitions between enabled and disabled can be ignored.
+ */
+ return -1;
+ }
+
+ return I915_ENGINE_SAMPLE_COUNT + val;
+}
+
+static unsigned int config_bit(const u64 config)
{
if (is_engine_config(config))
return engine_config_sample(config);
else
- return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
+ return other_bit(config);
}
-static u64 config_enabled_mask(u64 config)
+static u64 config_mask(u64 config)
{
- return BIT_ULL(config_enabled_bit(config));
+ return BIT_ULL(config_bit(config));
}
static bool is_engine_event(struct perf_event *event)
@@ -74,15 +97,15 @@ static bool is_engine_event(struct perf_event *event)
return is_engine_config(event->attr.config);
}
-static unsigned int event_enabled_bit(struct perf_event *event)
+static unsigned int event_bit(struct perf_event *event)
{
- return config_enabled_bit(event->attr.config);
+ return config_bit(event->attr.config);
}
static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
{
struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
- u64 enable;
+ u32 enable;
/*
* Only some counters need the sampling timer.
@@ -95,8 +118,8 @@ static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
* Mask out all the ones which do not need the timer, or in
* other words keep all the ones that could need the timer.
*/
- enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
- config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) |
+ enable &= config_mask(I915_PMU_ACTUAL_FREQUENCY) |
+ config_mask(I915_PMU_REQUESTED_FREQUENCY) |
ENGINE_SAMPLE_MASK;
/*
@@ -137,11 +160,9 @@ static u64 __get_rc6(struct intel_gt *gt)
return val;
}
-#if IS_ENABLED(CONFIG_PM)
-
-static inline s64 ktime_since(const ktime_t kt)
+static inline s64 ktime_since_raw(const ktime_t kt)
{
- return ktime_to_ns(ktime_sub(ktime_get(), kt));
+ return ktime_to_ns(ktime_sub(ktime_get_raw(), kt));
}
static u64 get_rc6(struct intel_gt *gt)
@@ -170,7 +191,7 @@ static u64 get_rc6(struct intel_gt *gt)
* on top of the last known real value, as the approximated RC6
* counter value.
*/
- val = ktime_since(pmu->sleep_last);
+ val = ktime_since_raw(pmu->sleep_last);
val += pmu->sample[__I915_SAMPLE_RC6].cur;
}
@@ -193,7 +214,7 @@ static void init_rc6(struct i915_pmu *pmu)
pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur =
pmu->sample[__I915_SAMPLE_RC6].cur;
- pmu->sleep_last = ktime_get();
+ pmu->sleep_last = ktime_get_raw();
}
}
@@ -202,21 +223,9 @@ static void park_rc6(struct drm_i915_private *i915)
struct i915_pmu *pmu = &i915->pmu;
pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
- pmu->sleep_last = ktime_get();
-}
-
-#else
-
-static u64 get_rc6(struct intel_gt *gt)
-{
- return __get_rc6(gt);
+ pmu->sleep_last = ktime_get_raw();
}
-static void init_rc6(struct i915_pmu *pmu) { }
-static void park_rc6(struct drm_i915_private *i915) {}
-
-#endif
-
static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
{
if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
@@ -355,8 +364,8 @@ add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
static bool frequency_sampling_enabled(struct i915_pmu *pmu)
{
return pmu->enable &
- (config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
- config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY));
+ (config_mask(I915_PMU_ACTUAL_FREQUENCY) |
+ config_mask(I915_PMU_REQUESTED_FREQUENCY));
}
static void
@@ -374,7 +383,7 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
if (!intel_gt_pm_get_if_awake(gt))
return;
- if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
+ if (pmu->enable & config_mask(I915_PMU_ACTUAL_FREQUENCY)) {
u32 val;
/*
@@ -396,7 +405,7 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
intel_gpu_freq(rps, val), period_ns / 1000);
}
- if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
+ if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) {
add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
intel_gpu_freq(rps, rps->cur_freq),
period_ns / 1000);
@@ -483,6 +492,8 @@ config_status(struct drm_i915_private *i915, u64 config)
if (!HAS_RC6(i915))
return -ENODEV;
break;
+ case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
+ break;
default:
return -ENOENT;
}
@@ -590,6 +601,9 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
case I915_PMU_RC6_RESIDENCY:
val = get_rc6(&i915->gt);
break;
+ case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
+ val = ktime_to_ns(intel_gt_get_awake_time(&i915->gt));
+ break;
}
}
@@ -622,9 +636,13 @@ static void i915_pmu_enable(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
- unsigned int bit = event_enabled_bit(event);
struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
+ unsigned int bit;
+
+ bit = event_bit(event);
+ if (bit == -1)
+ goto update;
spin_lock_irqsave(&pmu->lock, flags);
@@ -670,6 +688,7 @@ static void i915_pmu_enable(struct perf_event *event)
spin_unlock_irqrestore(&pmu->lock, flags);
+update:
/*
* Store the current counter value so we can report the correct delta
* for all listeners. Even when the event was already enabled and has
@@ -682,10 +701,13 @@ static void i915_pmu_disable(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
- unsigned int bit = event_enabled_bit(event);
+ unsigned int bit = event_bit(event);
struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
+ if (bit == -1)
+ return;
+
spin_lock_irqsave(&pmu->lock, flags);
if (is_engine_event(event)) {
@@ -882,6 +904,7 @@ create_event_attributes(struct i915_pmu *pmu)
__event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
__event(I915_PMU_INTERRUPTS, "interrupts", NULL),
__event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
+ __event(I915_PMU_SOFTWARE_GT_AWAKE_TIME, "software-gt-awake-time", "ns"),
};
static const struct {
enum drm_i915_pmu_engine_sample sample;
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 8405d6da5b9a..60f9595f902c 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -14,6 +14,21 @@
struct drm_i915_private;
+/**
+ * Non-engine events that we need to track enabled-disabled transition and
+ * current state.
+ */
+enum i915_pmu_tracked_events {
+ __I915_PMU_ACTUAL_FREQUENCY_ENABLED = 0,
+ __I915_PMU_REQUESTED_FREQUENCY_ENABLED,
+ __I915_PMU_RC6_RESIDENCY_ENABLED,
+ __I915_PMU_TRACKED_EVENT_COUNT, /* count marker */
+};
+
+/**
+ * Slots used from the sampling timer (non-engine events) with some extras for
+ * convenience.
+ */
enum {
__I915_SAMPLE_FREQ_ACT = 0,
__I915_SAMPLE_FREQ_REQ,
@@ -28,8 +43,7 @@ enum {
* It is also used to know to needed number of event reference counters.
*/
#define I915_PMU_MASK_BITS \
- ((1 << I915_PMU_SAMPLE_BITS) + \
- (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0)))
+ (I915_ENGINE_SAMPLE_COUNT + __I915_PMU_TRACKED_EVENT_COUNT)
#define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1)
@@ -66,18 +80,17 @@ struct i915_pmu {
*/
struct hrtimer timer;
/**
- * @enable: Bitmask of all currently enabled events.
+ * @enable: Bitmask of specific enabled events.
+ *
+ * For some events we need to track their state and do some internal
+ * house keeping.
*
- * Bits are derived from uAPI event numbers in a way that low 16 bits
- * correspond to engine event _sample_ _type_ (I915_SAMPLE_QUEUED is
- * bit 0), and higher bits correspond to other events (for instance
- * I915_PMU_ACTUAL_FREQUENCY is bit 16 etc).
+ * Each engine event sampler type and event listed in enum
+ * i915_pmu_tracked_events gets a bit in this field.
*
- * In other words, low 16 bits are not per engine but per engine
- * sampler type, while the upper bits are directly mapped to other
- * event types.
+ * Low bits are engine samplers and other events continue from there.
*/
- u64 enable;
+ u32 enable;
/**
* @timer_last:
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5375b219cc3b..598abd2f5b5f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2928,8 +2928,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define HDPORT_STATE _MMIO(0x45050)
#define HDPORT_DPLL_USED_MASK REG_GENMASK(14, 12)
-#define HDPORT_PHY_USED_DP(phy) REG_BIT(2 * (phy) + 2)
-#define HDPORT_PHY_USED_HDMI(phy) REG_BIT(2 * (phy) + 1)
+#define HDPORT_DDI_USED(phy) REG_BIT(2 * (phy) + 1)
#define HDPORT_ENABLED REG_BIT(0)
/* Make render/texture TLB fetches lower priorty than associated data
@@ -4347,12 +4346,13 @@ enum {
#define _TRANS_VRR_CTL_B 0x61420
#define _TRANS_VRR_CTL_C 0x62420
#define _TRANS_VRR_CTL_D 0x63420
-#define TRANS_VRR_CTL(trans) _MMIO_TRANS2(trans, _TRANS_VRR_CTL_A)
-#define VRR_CTL_VRR_ENABLE REG_BIT(31)
-#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30)
-#define VRR_CTL_FLIP_LINE_EN REG_BIT(29)
-#define VRR_CTL_LINE_COUNT_MASK REG_GENMASK(10, 3)
-#define VRR_CTL_SW_FULLLINE_COUNT REG_BIT(0)
+#define TRANS_VRR_CTL(trans) _MMIO_TRANS2(trans, _TRANS_VRR_CTL_A)
+#define VRR_CTL_VRR_ENABLE REG_BIT(31)
+#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30)
+#define VRR_CTL_FLIP_LINE_EN REG_BIT(29)
+#define VRR_CTL_PIPELINE_FULL_MASK REG_GENMASK(10, 3)
+#define VRR_CTL_PIPELINE_FULL(x) REG_FIELD_PREP(VRR_CTL_PIPELINE_FULL_MASK, (x))
+#define VRR_CTL_PIPELINE_FULL_OVERRIDE REG_BIT(0)
#define _TRANS_VRR_VMAX_A 0x60424
#define _TRANS_VRR_VMAX_B 0x61424
@@ -6578,6 +6578,7 @@ enum {
#define TGL_CURSOR_D_OFFSET 0x73080
/* Display A control */
+#define _DSPAADDR_VLV 0x7017C /* vlv/chv */
#define _DSPACNTR 0x70180
#define DISPLAY_PLANE_ENABLE (1 << 31)
#define DISPLAY_PLANE_DISABLE 0
@@ -6614,6 +6615,7 @@ enum {
#define DISPPLANE_ROTATE_180 (1 << 15)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1 << 14) /* Ironlake */
#define DISPPLANE_TILED (1 << 10)
+#define DISPPLANE_ASYNC_FLIP (1 << 9) /* g4x+ */
#define DISPPLANE_MIRROR (1 << 8) /* CHV pipe B */
#define _DSPAADDR 0x70184
#define _DSPASTRIDE 0x70188
@@ -6625,6 +6627,7 @@ enum {
#define _DSPASURFLIVE 0x701AC
#define _DSPAGAMC 0x701E0
+#define DSPADDR_VLV(plane) _MMIO_PIPE2(plane, _DSPAADDR_VLV)
#define DSPCNTR(plane) _MMIO_PIPE2(plane, _DSPACNTR)
#define DSPADDR(plane) _MMIO_PIPE2(plane, _DSPAADDR)
#define DSPSTRIDE(plane) _MMIO_PIPE2(plane, _DSPASTRIDE)
@@ -7070,6 +7073,8 @@ enum {
#define _PLANE_KEYMAX_1_A 0x701a0
#define _PLANE_KEYMAX_2_A 0x702a0
#define PLANE_KEYMAX_ALPHA(a) ((a) << 24)
+#define _PLANE_CC_VAL_1_A 0x701b4
+#define _PLANE_CC_VAL_2_A 0x702b4
#define _PLANE_AUX_DIST_1_A 0x701c0
#define _PLANE_AUX_DIST_2_A 0x702c0
#define _PLANE_AUX_OFFSET_1_A 0x701c4
@@ -7111,6 +7116,13 @@ enum {
#define _PLANE_NV12_BUF_CFG_1_A 0x70278
#define _PLANE_NV12_BUF_CFG_2_A 0x70378
+#define _PLANE_CC_VAL_1_B 0x711b4
+#define _PLANE_CC_VAL_2_B 0x712b4
+#define _PLANE_CC_VAL_1(pipe) _PIPE(pipe, _PLANE_CC_VAL_1_A, _PLANE_CC_VAL_1_B)
+#define _PLANE_CC_VAL_2(pipe) _PIPE(pipe, _PLANE_CC_VAL_2_A, _PLANE_CC_VAL_2_B)
+#define PLANE_CC_VAL(pipe, plane) \
+ _MMIO_PLANE(plane, _PLANE_CC_VAL_1(pipe), _PLANE_CC_VAL_2(pipe))
+
/* Input CSC Register Definitions */
#define _PLANE_INPUT_CSC_RY_GY_1_A 0x701E0
#define _PLANE_INPUT_CSC_RY_GY_2_A 0x702E0
@@ -9872,6 +9884,7 @@ enum skl_power_gate {
_PORTD_HDCP2_BASE, \
_PORTE_HDCP2_BASE, \
_PORTF_HDCP2_BASE) + (x))
+
#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98)
#define _TRANSA_HDCP2_AUTH 0x66498
#define _TRANSB_HDCP2_AUTH 0x66598
@@ -9911,6 +9924,44 @@ enum skl_power_gate {
TRANS_HDCP2_STATUS(trans) : \
PORT_HDCP2_STATUS(port))
+#define _PIPEA_HDCP2_STREAM_STATUS 0x668C0
+#define _PIPEB_HDCP2_STREAM_STATUS 0x665C0
+#define _PIPEC_HDCP2_STREAM_STATUS 0x666C0
+#define _PIPED_HDCP2_STREAM_STATUS 0x667C0
+#define PIPE_HDCP2_STREAM_STATUS(pipe) _MMIO(_PICK((pipe), \
+ _PIPEA_HDCP2_STREAM_STATUS, \
+ _PIPEB_HDCP2_STREAM_STATUS, \
+ _PIPEC_HDCP2_STREAM_STATUS, \
+ _PIPED_HDCP2_STREAM_STATUS))
+
+#define _TRANSA_HDCP2_STREAM_STATUS 0x664C0
+#define _TRANSB_HDCP2_STREAM_STATUS 0x665C0
+#define TRANS_HDCP2_STREAM_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_STREAM_STATUS, \
+ _TRANSB_HDCP2_STREAM_STATUS)
+#define STREAM_ENCRYPTION_STATUS BIT(31)
+#define STREAM_TYPE_STATUS BIT(30)
+#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP2_STREAM_STATUS(trans) : \
+ PIPE_HDCP2_STREAM_STATUS(pipe))
+
+#define _PORTA_HDCP2_AUTH_STREAM 0x66F00
+#define _PORTB_HDCP2_AUTH_STREAM 0x66F04
+#define PORT_HDCP2_AUTH_STREAM(port) _MMIO_PORT(port, \
+ _PORTA_HDCP2_AUTH_STREAM, \
+ _PORTB_HDCP2_AUTH_STREAM)
+#define _TRANSA_HDCP2_AUTH_STREAM 0x66F00
+#define _TRANSB_HDCP2_AUTH_STREAM 0x66F04
+#define TRANS_HDCP2_AUTH_STREAM(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_AUTH_STREAM, \
+ _TRANSB_HDCP2_AUTH_STREAM)
+#define AUTH_STREAM_TYPE BIT(31)
+#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP2_AUTH_STREAM(trans) : \
+ PORT_HDCP2_AUTH_STREAM(port))
+
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
@@ -9960,6 +10011,7 @@ enum skl_power_gate {
#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8)
#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1 << 6)
+#define TRANS_DDI_HDCP_SELECT REG_BIT(5)
#define TRANS_DDI_BFI_ENABLE (1 << 4)
#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1 << 4)
#define TRANS_DDI_HDMI_SCRAMBLING (1 << 0)
@@ -10851,8 +10903,10 @@ enum skl_power_gate {
#define CNL_DRAM_RANK_3 (0x2 << 9)
#define CNL_DRAM_RANK_4 (0x3 << 9)
-/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
- * since on HSW we can't write to it using I915_WRITE. */
+/*
+ * Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
+ * since on HSW we can't write to it using intel_uncore_write.
+ */
#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
#define D_COMP_BDW _MMIO(0x138144)
#define D_COMP_RCOMP_IN_PROGRESS (1 << 9)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 5385b081a376..22e39d938f17 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -33,6 +33,7 @@
#include "gem/i915_gem_context.h"
#include "gt/intel_breadcrumbs.h"
#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
#include "gt/intel_rps.h"
@@ -275,7 +276,7 @@ static void remove_from_engine(struct i915_request *rq)
bool i915_request_retire(struct i915_request *rq)
{
- if (!i915_request_completed(rq))
+ if (!__i915_request_is_complete(rq))
return false;
RQ_TRACE(rq, "\n");
@@ -306,10 +307,8 @@ bool i915_request_retire(struct i915_request *rq)
spin_unlock_irq(&rq->lock);
}
- if (i915_request_has_waitboost(rq)) {
- GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters));
+ if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags))
atomic_dec(&rq->engine->gt->rps.num_waiters);
- }
/*
* We only loosely track inflight requests across preemption,
@@ -321,7 +320,8 @@ bool i915_request_retire(struct i915_request *rq)
* after removing the breadcrumb and signaling it, so that we do not
* inadvertently attach the breadcrumb to a completed request.
*/
- remove_from_engine(rq);
+ if (!list_empty(&rq->sched.link))
+ remove_from_engine(rq);
GEM_BUG_ON(!llist_empty(&rq->execute_cb));
__list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
@@ -342,8 +342,7 @@ void i915_request_retire_upto(struct i915_request *rq)
struct i915_request *tmp;
RQ_TRACE(rq, "\n");
-
- GEM_BUG_ON(!i915_request_completed(rq));
+ GEM_BUG_ON(!__i915_request_is_complete(rq));
do {
tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
@@ -488,6 +487,8 @@ void __i915_request_skip(struct i915_request *rq)
if (rq->infix == rq->postfix)
return;
+ RQ_TRACE(rq, "error: %d\n", rq->fence.error);
+
/*
* As this request likely depends on state from the lost
* context, clear out all the user operations leaving the
@@ -513,6 +514,17 @@ void i915_request_set_error_once(struct i915_request *rq, int error)
} while (!try_cmpxchg(&rq->fence.error, &old, error));
}
+void i915_request_mark_eio(struct i915_request *rq)
+{
+ if (__i915_request_is_complete(rq))
+ return;
+
+ GEM_BUG_ON(i915_request_signaled(rq));
+
+ i915_request_set_error_once(rq, -EIO);
+ i915_request_mark_complete(rq);
+}
+
bool __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
@@ -539,12 +551,10 @@ bool __i915_request_submit(struct i915_request *request)
* dropped upon retiring. (Otherwise if resubmit a *retired*
* request, this would be a horrible use-after-free.)
*/
- if (i915_request_completed(request))
- goto xfer;
-
- if (unlikely(intel_context_is_closed(request->context) &&
- !intel_engine_has_heartbeat(engine)))
- intel_context_set_banned(request->context);
+ if (__i915_request_is_complete(request)) {
+ list_del_init(&request->sched.link);
+ goto active;
+ }
if (unlikely(intel_context_is_banned(request->context)))
i915_request_set_error_once(request, -EIO);
@@ -579,11 +589,11 @@ bool __i915_request_submit(struct i915_request *request)
engine->serial++;
result = true;
-xfer:
- if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
- list_move_tail(&request->sched.link, &engine->active.requests);
- clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
- }
+ GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
+ list_move_tail(&request->sched.link, &engine->active.requests);
+active:
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
+ set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
/*
* XXX Rollback bonded-execution on __i915_request_unsubmit()?
@@ -643,7 +653,7 @@ void __i915_request_unsubmit(struct i915_request *request)
i915_request_cancel_breadcrumb(request);
/* We've already spun, don't charge on resubmitting. */
- if (request->sched.semaphores && i915_request_started(request))
+ if (request->sched.semaphores && __i915_request_has_started(request))
request->sched.semaphores = 0;
/*
@@ -855,7 +865,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
RCU_INIT_POINTER(rq->timeline, tl);
RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
rq->hwsp_seqno = tl->hwsp_seqno;
- GEM_BUG_ON(i915_request_completed(rq));
+ GEM_BUG_ON(__i915_request_is_complete(rq));
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
@@ -961,15 +971,22 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
if (i915_request_started(signal))
return 0;
+ /*
+ * The caller holds a reference on @signal, but we do not serialise
+ * against it being retired and removed from the lists.
+ *
+ * We do not hold a reference to the request before @signal, and
+ * so must be very careful to ensure that it is not _recycled_ as
+ * we follow the link backwards.
+ */
fence = NULL;
rcu_read_lock();
- spin_lock_irq(&signal->lock);
do {
struct list_head *pos = READ_ONCE(signal->link.prev);
struct i915_request *prev;
/* Confirm signal has not been retired, the link is valid */
- if (unlikely(i915_request_started(signal)))
+ if (unlikely(__i915_request_has_started(signal)))
break;
/* Is signal the earliest request on its timeline? */
@@ -994,7 +1011,6 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
fence = &prev->fence;
} while (0);
- spin_unlock_irq(&signal->lock);
rcu_read_unlock();
if (!fence)
return 0;
@@ -1511,7 +1527,7 @@ __i915_request_add_to_timeline(struct i915_request *rq)
*/
prev = to_request(__i915_active_fence_set(&timeline->last_request,
&rq->fence));
- if (prev && !i915_request_completed(prev)) {
+ if (prev && !__i915_request_is_complete(prev)) {
/*
* The requests are supposed to be kept in order. However,
* we need to be wary in case the timeline->last_request
@@ -1582,6 +1598,12 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
return __i915_request_add_to_timeline(rq);
}
+void __i915_request_queue_bh(struct i915_request *rq)
+{
+ i915_sw_fence_commit(&rq->semaphore);
+ i915_sw_fence_commit(&rq->submit);
+}
+
void __i915_request_queue(struct i915_request *rq,
const struct i915_sched_attr *attr)
{
@@ -1598,8 +1620,10 @@ void __i915_request_queue(struct i915_request *rq,
*/
if (attr && rq->engine->schedule)
rq->engine->schedule(rq, attr);
- i915_sw_fence_commit(&rq->semaphore);
- i915_sw_fence_commit(&rq->submit);
+
+ local_bh_disable();
+ __i915_request_queue_bh(rq);
+ local_bh_enable(); /* kick tasklets */
}
void i915_request_add(struct i915_request *rq)
@@ -1823,7 +1847,7 @@ long i915_request_wait(struct i915_request *rq,
* for unhappy HW.
*/
if (i915_request_is_ready(rq))
- intel_engine_flush_submission(rq->engine);
+ __intel_engine_flush_submission(rq->engine, false);
for (;;) {
set_current_state(state);
@@ -1855,6 +1879,106 @@ out:
return timeout;
}
+static int print_sched_attr(const struct i915_sched_attr *attr,
+ char *buf, int x, int len)
+{
+ if (attr->priority == I915_PRIORITY_INVALID)
+ return x;
+
+ x += snprintf(buf + x, len - x,
+ " prio=%d", attr->priority);
+
+ return x;
+}
+
+static char queue_status(const struct i915_request *rq)
+{
+ if (i915_request_is_active(rq))
+ return 'E';
+
+ if (i915_request_is_ready(rq))
+ return intel_engine_is_virtual(rq->engine) ? 'V' : 'R';
+
+ return 'U';
+}
+
+static const char *run_status(const struct i915_request *rq)
+{
+ if (__i915_request_is_complete(rq))
+ return "!";
+
+ if (__i915_request_has_started(rq))
+ return "*";
+
+ if (!i915_sw_fence_signaled(&rq->semaphore))
+ return "&";
+
+ return "";
+}
+
+static const char *fence_status(const struct i915_request *rq)
+{
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ return "+";
+
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
+ return "-";
+
+ return "";
+}
+
+void i915_request_show(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent)
+{
+ const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence);
+ char buf[80] = "";
+ int x = 0;
+
+ /*
+ * The prefix is used to show the queue status, for which we use
+ * the following flags:
+ *
+ * U [Unready]
+ * - initial status upon being submitted by the user
+ *
+ * - the request is not ready for execution as it is waiting
+ * for external fences
+ *
+ * R [Ready]
+ * - all fences the request was waiting on have been signaled,
+ * and the request is now ready for execution and will be
+ * in a backend queue
+ *
+ * - a ready request may still need to wait on semaphores
+ * [internal fences]
+ *
+ * V [Ready/virtual]
+ * - same as ready, but queued over multiple backends
+ *
+ * E [Executing]
+ * - the request has been transferred from the backend queue and
+ * submitted for execution on HW
+ *
+ * - a completed request may still be regarded as executing, its
+ * status may not be updated until it is retired and removed
+ * from the lists
+ */
+
+ x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
+
+ drm_printf(m, "%s%.*s%c %llx:%lld%s%s %s @ %dms: %s\n",
+ prefix, indent, " ",
+ queue_status(rq),
+ rq->fence.context, rq->fence.seqno,
+ run_status(rq),
+ fence_status(rq),
+ buf,
+ jiffies_to_msecs(jiffies - rq->emitted_jiffies),
+ name);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c"
#include "selftests/i915_request.c"
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 92adfee30c7c..1bfe214a47e9 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -43,6 +43,7 @@
struct drm_file;
struct drm_i915_gem_object;
+struct drm_printer;
struct i915_request;
struct i915_capture_list {
@@ -308,12 +309,14 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp);
struct i915_request * __must_check
i915_request_create(struct intel_context *ce);
-void i915_request_set_error_once(struct i915_request *rq, int error);
void __i915_request_skip(struct i915_request *rq);
+void i915_request_set_error_once(struct i915_request *rq, int error);
+void i915_request_mark_eio(struct i915_request *rq);
struct i915_request *__i915_request_commit(struct i915_request *request);
void __i915_request_queue(struct i915_request *rq,
const struct i915_sched_attr *attr);
+void __i915_request_queue_bh(struct i915_request *rq);
bool i915_request_retire(struct i915_request *rq);
void i915_request_retire_upto(struct i915_request *rq);
@@ -371,6 +374,11 @@ long i915_request_wait(struct i915_request *rq,
#define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
+void i915_request_show(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent);
+
static inline bool i915_request_signaled(const struct i915_request *rq)
{
/* The request may live longer than its HWSP, so check flags first! */
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index cbb880b10c65..7144239f08df 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -458,14 +458,10 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
if (!dep)
return -ENOMEM;
- local_bh_disable();
-
if (!__i915_sched_node_add_dependency(node, signal, dep,
flags | I915_DEPENDENCY_ALLOC))
i915_dependency_free(dep);
- local_bh_enable(); /* kick submission tasklet */
-
return 0;
}
@@ -504,6 +500,34 @@ void i915_sched_node_fini(struct i915_sched_node *node)
spin_unlock_irq(&schedule_lock);
}
+void i915_request_show_with_schedule(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent)
+{
+ struct i915_dependency *dep;
+
+ i915_request_show(m, rq, prefix, indent);
+ if (i915_request_completed(rq))
+ return;
+
+ rcu_read_lock();
+ for_each_signaler(dep, rq) {
+ const struct i915_request *signaler =
+ node_to_request(dep->signaler);
+
+ /* Dependencies along the same timeline are expected. */
+ if (signaler->timeline == rq->timeline)
+ continue;
+
+ if (__i915_request_is_complete(signaler))
+ continue;
+
+ i915_request_show(m, signaler, prefix, indent + 2);
+ }
+ rcu_read_unlock();
+}
+
static void i915_global_scheduler_shrink(void)
{
kmem_cache_shrink(global.slab_dependencies);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 6f0bf00fc569..4501e5ac2637 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -13,6 +13,8 @@
#include "i915_scheduler_types.h"
+struct drm_printer;
+
#define priolist_for_each_request(it, plist, idx) \
for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
list_for_each_entry(it, &(plist)->requests[idx], sched.link)
@@ -54,4 +56,9 @@ static inline void i915_priolist_free(struct i915_priolist *p)
__i915_priolist_free(p);
}
+void i915_request_show_with_schedule(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent);
+
#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index f72e6c397b08..343ed44d5ed4 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -81,4 +81,14 @@ struct i915_dependency {
#define I915_DEPENDENCY_WEAK BIT(2)
};
+#define for_each_waiter(p__, rq__) \
+ list_for_each_entry_lockless(p__, \
+ &(rq__)->sched.waiters_list, \
+ wait_link)
+
+#define for_each_signaler(p__, rq__) \
+ list_for_each_entry_rcu(p__, \
+ &(rq__)->sched.signalers_list, \
+ signal_link)
+
#endif /* _I915_SCHEDULER_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index db2111fc809e..63212df33c9e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -24,6 +24,7 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+#include "display/intel_de.h"
#include "display/intel_fbc.h"
#include "display/intel_gmbus.h"
#include "display/intel_vga.h"
@@ -39,21 +40,21 @@ static void intel_save_swf(struct drm_i915_private *dev_priv)
/* Scratch space */
if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
for (i = 0; i < 7; i++) {
- dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
- dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+ dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv, SWF0(i));
+ dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i));
}
for (i = 0; i < 3; i++)
- dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
+ dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv, SWF3(i));
} else if (IS_GEN(dev_priv, 2)) {
for (i = 0; i < 7; i++)
- dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+ dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i));
} else if (HAS_GMCH(dev_priv)) {
for (i = 0; i < 16; i++) {
- dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
- dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+ dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv, SWF0(i));
+ dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i));
}
for (i = 0; i < 3; i++)
- dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
+ dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv, SWF3(i));
}
}
@@ -64,21 +65,21 @@ static void intel_restore_swf(struct drm_i915_private *dev_priv)
/* Scratch space */
if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
for (i = 0; i < 7; i++) {
- I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
- I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
+ intel_de_write(dev_priv, SWF0(i), dev_priv->regfile.saveSWF0[i]);
+ intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]);
}
for (i = 0; i < 3; i++)
- I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
+ intel_de_write(dev_priv, SWF3(i), dev_priv->regfile.saveSWF3[i]);
} else if (IS_GEN(dev_priv, 2)) {
for (i = 0; i < 7; i++)
- I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
+ intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]);
} else if (HAS_GMCH(dev_priv)) {
for (i = 0; i < 16; i++) {
- I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
- I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
+ intel_de_write(dev_priv, SWF0(i), dev_priv->regfile.saveSWF0[i]);
+ intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]);
}
for (i = 0; i < 3; i++)
- I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
+ intel_de_write(dev_priv, SWF3(i), dev_priv->regfile.saveSWF3[i]);
}
}
@@ -88,7 +89,7 @@ void i915_save_display(struct drm_i915_private *dev_priv)
/* Display arbitration control */
if (INTEL_GEN(dev_priv) <= 4)
- dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
+ dev_priv->regfile.saveDSPARB = intel_de_read(dev_priv, DSPARB);
if (IS_GEN(dev_priv, 4))
pci_read_config_word(pdev, GCDGMBUS,
@@ -109,7 +110,7 @@ void i915_restore_display(struct drm_i915_private *dev_priv)
/* Display arbitration */
if (INTEL_GEN(dev_priv) <= 4)
- I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
+ intel_de_write(dev_priv, DSPARB, dev_priv->regfile.saveDSPARB);
/* only restore FBC info on the platform that supports FBC*/
intel_fbc_global_disable(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 038d4c6884c5..2744558f3050 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -18,10 +18,15 @@
#define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
-#define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
-
static DEFINE_SPINLOCK(i915_sw_fence_lock);
+#define WQ_FLAG_BITS \
+ BITS_PER_TYPE(typeof_member(struct wait_queue_entry, flags))
+
+/* after WQ_FLAG_* for safety */
+#define I915_SW_FENCE_FLAG_FENCE BIT(WQ_FLAG_BITS - 1)
+#define I915_SW_FENCE_FLAG_ALLOC BIT(WQ_FLAG_BITS - 2)
+
enum {
DEBUG_FENCE_IDLE = 0,
DEBUG_FENCE_NOTIFY,
@@ -154,10 +159,10 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
if (continuation) {
list_for_each_entry_safe(pos, next, &x->head, entry) {
- if (pos->func == autoremove_wake_function)
- pos->func(pos, TASK_NORMAL, 0, continuation);
- else
+ if (pos->flags & I915_SW_FENCE_FLAG_FENCE)
list_move_tail(&pos->entry, continuation);
+ else
+ pos->func(pos, TASK_NORMAL, 0, continuation);
}
} else {
LIST_HEAD(extra);
@@ -166,9 +171,9 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
list_for_each_entry_safe(pos, next, &x->head, entry) {
int wake_flags;
- wake_flags = fence->error;
- if (pos->func == autoremove_wake_function)
- wake_flags = 0;
+ wake_flags = 0;
+ if (pos->flags & I915_SW_FENCE_FLAG_FENCE)
+ wake_flags = fence->error;
pos->func(pos, TASK_NORMAL, wake_flags, &extra);
}
@@ -332,8 +337,8 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
struct i915_sw_fence *signaler,
wait_queue_entry_t *wq, gfp_t gfp)
{
+ unsigned int pending;
unsigned long flags;
- int pending;
debug_fence_assert(fence);
might_sleep_if(gfpflags_allow_blocking(gfp));
@@ -349,7 +354,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
return -EINVAL;
- pending = 0;
+ pending = I915_SW_FENCE_FLAG_FENCE;
if (!wq) {
wq = kmalloc(sizeof(*wq), gfp);
if (!wq) {
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 4c305d838016..f9e780dee9de 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -87,7 +87,7 @@ bool i915_error_injected(void)
void cancel_timer(struct timer_list *t)
{
- if (!READ_ONCE(t->expires))
+ if (!timer_active(t))
return;
del_timer(t);
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 54773371e6bd..abd4dcd9f79c 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -438,9 +438,14 @@ static inline void __add_taint_for_CI(unsigned int taint)
void cancel_timer(struct timer_list *t);
void set_timer_ms(struct timer_list *t, unsigned long timeout);
+static inline bool timer_active(const struct timer_list *t)
+{
+ return READ_ONCE(t->expires);
+}
+
static inline bool timer_expired(const struct timer_list *t)
{
- return READ_ONCE(t->expires) && !timer_pending(t);
+ return timer_active(t) && !timer_pending(t);
}
/*
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 5b3a3c653454..a64adc8c883b 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -363,6 +363,21 @@ i915_vma_unpin_fence(struct i915_vma *vma)
void i915_vma_parked(struct intel_gt *gt);
+static inline bool i915_vma_is_scanout(const struct i915_vma *vma)
+{
+ return test_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
+static inline void i915_vma_mark_scanout(struct i915_vma *vma)
+{
+ set_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
+static inline void i915_vma_clear_scanout(struct i915_vma *vma)
+{
+ clear_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
#define for_each_until(cond) if (cond) break; else
/**
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index 9e9082dc8f4b..f5cb848b7a7e 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -249,6 +249,9 @@ struct i915_vma {
#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT))
#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT))
+#define I915_VMA_SCANOUT_BIT 18
+#define I915_VMA_SCANOUT ((int)BIT(I915_VMA_SCANOUT_BIT))
+
struct i915_active active;
#define I915_VMA_PAGES_BIAS 24
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index e67cec8fa2aa..f2d5ae59081e 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -104,7 +104,7 @@ void intel_device_info_print_static(const struct intel_device_info *info,
drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size);
-#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
+#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name))
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
@@ -117,150 +117,6 @@ void intel_device_info_print_runtime(const struct intel_runtime_info *info,
struct drm_printer *p)
{
drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
- drm_printf(p, "CS timestamp frequency: %u Hz\n",
- info->cs_timestamp_frequency_hz);
-}
-
-static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
-{
- u32 ts_override = intel_uncore_read(&dev_priv->uncore,
- GEN9_TIMESTAMP_OVERRIDE);
- u32 base_freq, frac_freq;
-
- base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
- GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
- base_freq *= 1000000;
-
- frac_freq = ((ts_override &
- GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
- GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
- frac_freq = 1000000 / (frac_freq + 1);
-
- return base_freq + frac_freq;
-}
-
-static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
- u32 rpm_config_reg)
-{
- u32 f19_2_mhz = 19200000;
- u32 f24_mhz = 24000000;
- u32 crystal_clock = (rpm_config_reg &
- GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
- GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-
- switch (crystal_clock) {
- case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
- return f19_2_mhz;
- case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
- return f24_mhz;
- default:
- MISSING_CASE(crystal_clock);
- return 0;
- }
-}
-
-static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
- u32 rpm_config_reg)
-{
- u32 f19_2_mhz = 19200000;
- u32 f24_mhz = 24000000;
- u32 f25_mhz = 25000000;
- u32 f38_4_mhz = 38400000;
- u32 crystal_clock = (rpm_config_reg &
- GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
- GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-
- switch (crystal_clock) {
- case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
- return f24_mhz;
- case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
- return f19_2_mhz;
- case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
- return f38_4_mhz;
- case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
- return f25_mhz;
- default:
- MISSING_CASE(crystal_clock);
- return 0;
- }
-}
-
-static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 f12_5_mhz = 12500000;
- u32 f19_2_mhz = 19200000;
- u32 f24_mhz = 24000000;
-
- if (INTEL_GEN(dev_priv) <= 4) {
- /* PRMs say:
- *
- * "The value in this register increments once every 16
- * hclks." (through the “Clocking Configuration”
- * (“CLKCFG”) MCHBAR register)
- */
- return RUNTIME_INFO(dev_priv)->rawclk_freq * 1000 / 16;
- } else if (INTEL_GEN(dev_priv) <= 8) {
- /* PRMs say:
- *
- * "The PCU TSC counts 10ns increments; this timestamp
- * reflects bits 38:3 of the TSC (i.e. 80ns granularity,
- * rolling over every 1.5 hours).
- */
- return f12_5_mhz;
- } else if (INTEL_GEN(dev_priv) <= 9) {
- u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
- u32 freq = 0;
-
- if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
- freq = read_reference_ts_freq(dev_priv);
- } else {
- freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz;
-
- /* Now figure out how the command stream's timestamp
- * register increments from this frequency (it might
- * increment only every few clock cycle).
- */
- freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
- CTC_SHIFT_PARAMETER_SHIFT);
- }
-
- return freq;
- } else if (INTEL_GEN(dev_priv) <= 12) {
- u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
- u32 freq = 0;
-
- /* First figure out the reference frequency. There are 2 ways
- * we can compute the frequency, either through the
- * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
- * tells us which one we should use.
- */
- if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
- freq = read_reference_ts_freq(dev_priv);
- } else {
- u32 rpm_config_reg = intel_uncore_read(uncore, RPM_CONFIG0);
-
- if (INTEL_GEN(dev_priv) <= 10)
- freq = gen10_get_crystal_clock_freq(dev_priv,
- rpm_config_reg);
- else
- freq = gen11_get_crystal_clock_freq(dev_priv,
- rpm_config_reg);
-
- /* Now figure out how the command stream's timestamp
- * register increments from this frequency (it might
- * increment only every few clock cycle).
- */
- freq >>= 3 - ((rpm_config_reg &
- GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
- GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
- }
-
- return freq;
- }
-
- MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
- return 0;
}
#undef INTEL_VGA_DEVICE
@@ -505,19 +361,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
runtime->rawclk_freq = intel_read_rawclk(dev_priv);
drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq);
- /* Initialize command stream timestamp frequency */
- runtime->cs_timestamp_frequency_hz =
- read_timestamp_frequency(dev_priv);
- if (runtime->cs_timestamp_frequency_hz) {
- runtime->cs_timestamp_period_ns =
- i915_cs_timestamp_ticks_to_ns(dev_priv, 1);
- drm_dbg(&dev_priv->drm,
- "CS timestamp wraparound in %lldms\n",
- div_u64(mul_u32_u32(runtime->cs_timestamp_period_ns,
- S32_MAX),
- USEC_PER_SEC));
- }
-
if (!HAS_DISPLAY(dev_priv)) {
dev_priv->drm.driver_features &= ~(DRIVER_MODESET |
DRIVER_ATOMIC);
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index d92fa041c700..cf2d528c6e9b 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -123,7 +123,6 @@ enum intel_ppgtt_type {
func(has_llc); \
func(has_logical_ring_contexts); \
func(has_logical_ring_elsq); \
- func(has_logical_ring_preemption); \
func(has_master_unit_irq); \
func(has_pooled_eu); \
func(has_rc6); \
@@ -224,9 +223,6 @@ struct intel_runtime_info {
u8 num_scalers[I915_MAX_PIPES];
u32 rawclk_freq;
-
- u32 cs_timestamp_frequency_hz;
- u32 cs_timestamp_period_ns;
};
struct intel_driver_caps {
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 4754296a250e..73d256fc6830 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "intel_dram.h"
+#include "intel_sideband.h"
struct dram_dimm_info {
u16 size;
@@ -201,22 +202,12 @@ skl_dram_get_channels_info(struct drm_i915_private *i915)
return -EINVAL;
}
- /*
- * If any of the channel is single rank channel, worst case output
- * will be same as if single rank memory, so consider single rank
- * memory.
- */
- if (ch0.ranks == 1 || ch1.ranks == 1)
- dram_info->ranks = 1;
- else
- dram_info->ranks = max(ch0.ranks, ch1.ranks);
-
- if (dram_info->ranks == 0) {
+ if (ch0.ranks == 0 && ch1.ranks == 0) {
drm_info(&i915->drm, "couldn't get memory rank information\n");
return -EINVAL;
}
- dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
+ dram_info->wm_lv_0_adjust_needed = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
@@ -269,16 +260,12 @@ skl_get_dram_info(struct drm_i915_private *i915)
mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
- dram_info->bandwidth_kbps = dram_info->num_channels *
- mem_freq_khz * 8;
-
- if (dram_info->bandwidth_kbps == 0) {
+ if (dram_info->num_channels * mem_freq_khz == 0) {
drm_info(&i915->drm,
"Couldn't get system memory bandwidth\n");
return -EINVAL;
}
- dram_info->valid = true;
return 0;
}
@@ -365,7 +352,7 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
struct dram_info *dram_info = &i915->dram_info;
u32 dram_channels;
u32 mem_freq_khz, val;
- u8 num_active_channels;
+ u8 num_active_channels, valid_ranks = 0;
int i;
val = intel_uncore_read(&i915->uncore, BXT_P_CR_MC_BIOS_REQ_0_0_0);
@@ -375,10 +362,7 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
num_active_channels = hweight32(dram_channels);
- /* Each active bit represents 4-byte channel */
- dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
-
- if (dram_info->bandwidth_kbps == 0) {
+ if (mem_freq_khz * num_active_channels == 0) {
drm_info(&i915->drm,
"Couldn't get system memory bandwidth\n");
return -EINVAL;
@@ -410,57 +394,125 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
dimm.size, dimm.width, dimm.ranks,
intel_dram_type_str(type));
- /*
- * If any of the channel is single rank channel,
- * worst case output will be same as if single rank
- * memory, so consider single rank memory.
- */
- if (dram_info->ranks == 0)
- dram_info->ranks = dimm.ranks;
- else if (dimm.ranks == 1)
- dram_info->ranks = 1;
+ if (valid_ranks == 0)
+ valid_ranks = dimm.ranks;
if (type != INTEL_DRAM_UNKNOWN)
dram_info->type = type;
}
- if (dram_info->type == INTEL_DRAM_UNKNOWN || dram_info->ranks == 0) {
+ if (dram_info->type == INTEL_DRAM_UNKNOWN || valid_ranks == 0) {
drm_info(&i915->drm, "couldn't get memory information\n");
return -EINVAL;
}
- dram_info->valid = true;
+ return 0;
+}
+
+static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
+{
+ struct dram_info *dram_info = &dev_priv->dram_info;
+ u32 val = 0;
+ int ret;
+
+ ret = sandybridge_pcode_read(dev_priv,
+ ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
+ &val, NULL);
+ if (ret)
+ return ret;
+
+ if (IS_GEN(dev_priv, 12)) {
+ switch (val & 0xf) {
+ case 0:
+ dram_info->type = INTEL_DRAM_DDR4;
+ break;
+ case 3:
+ dram_info->type = INTEL_DRAM_LPDDR4;
+ break;
+ case 4:
+ dram_info->type = INTEL_DRAM_DDR3;
+ break;
+ case 5:
+ dram_info->type = INTEL_DRAM_LPDDR3;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ return -1;
+ }
+ } else {
+ switch (val & 0xf) {
+ case 0:
+ dram_info->type = INTEL_DRAM_DDR4;
+ break;
+ case 1:
+ dram_info->type = INTEL_DRAM_DDR3;
+ break;
+ case 2:
+ dram_info->type = INTEL_DRAM_LPDDR3;
+ break;
+ case 3:
+ dram_info->type = INTEL_DRAM_LPDDR4;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ return -1;
+ }
+ }
+
+ dram_info->num_channels = (val & 0xf0) >> 4;
+ dram_info->num_qgv_points = (val & 0xf00) >> 8;
return 0;
}
+static int gen11_get_dram_info(struct drm_i915_private *i915)
+{
+ int ret = skl_get_dram_info(i915);
+
+ if (ret)
+ return ret;
+
+ return icl_pcode_read_mem_global_info(i915);
+}
+
+static int gen12_get_dram_info(struct drm_i915_private *i915)
+{
+ /* Always needed for GEN12+ */
+ i915->dram_info.wm_lv_0_adjust_needed = true;
+
+ return icl_pcode_read_mem_global_info(i915);
+}
+
void intel_dram_detect(struct drm_i915_private *i915)
{
struct dram_info *dram_info = &i915->dram_info;
int ret;
/*
- * Assume 16Gb DIMMs are present until proven otherwise.
- * This is only used for the level 0 watermark latency
- * w/a which does not apply to bxt/glk.
+ * Assume level 0 watermark latency adjustment is needed until proven
+ * otherwise, this w/a is not needed by bxt/glk.
*/
- dram_info->is_16gb_dimm = !IS_GEN9_LP(i915);
+ dram_info->wm_lv_0_adjust_needed = !IS_GEN9_LP(i915);
if (INTEL_GEN(i915) < 9 || !HAS_DISPLAY(i915))
return;
- if (IS_GEN9_LP(i915))
+ if (INTEL_GEN(i915) >= 12)
+ ret = gen12_get_dram_info(i915);
+ else if (INTEL_GEN(i915) >= 11)
+ ret = gen11_get_dram_info(i915);
+ else if (IS_GEN9_LP(i915))
ret = bxt_get_dram_info(i915);
else
ret = skl_get_dram_info(i915);
if (ret)
return;
- drm_dbg_kms(&i915->drm, "DRAM bandwidth: %u kBps, channels: %u\n",
- dram_info->bandwidth_kbps, dram_info->num_channels);
+ drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels);
- drm_dbg_kms(&i915->drm, "DRAM ranks: %u, 16Gb DIMMs: %s\n",
- dram_info->ranks, yesno(dram_info->is_16gb_dimm));
+ drm_dbg_kms(&i915->drm, "Watermark level 0 adjustment needed: %s\n",
+ yesno(dram_info->wm_lv_0_adjust_needed));
}
static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index b326993a1026..1bfcdd89b241 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -10,7 +10,7 @@
#define REGION_MAP(type, inst) \
BIT((type) + INTEL_MEMORY_TYPE_SHIFT) | BIT(inst)
-const u32 intel_region_map[] = {
+static const u32 intel_region_map[] = {
[INTEL_REGION_SMEM] = REGION_MAP(INTEL_MEMORY_SYSTEM, 0),
[INTEL_REGION_LMEM] = REGION_MAP(INTEL_MEMORY_LOCAL, 0),
[INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0),
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 232490d89a83..6ffc0673f005 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -51,21 +51,16 @@ enum intel_region_id {
for (id = 0; id < ARRAY_SIZE((i915)->mm.regions); id++) \
for_each_if((mr) = (i915)->mm.regions[id])
-/**
- * Memory regions encoded as type | instance
- */
-extern const u32 intel_region_map[];
-
struct intel_memory_region_ops {
unsigned int flags;
int (*init)(struct intel_memory_region *mem);
void (*release)(struct intel_memory_region *mem);
- struct drm_i915_gem_object *
- (*create_object)(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags);
+ int (*init_object)(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags);
};
struct intel_memory_region {
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index f31c0dabd0cc..ecaf314d60b6 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -143,8 +143,9 @@ static bool intel_is_virt_pch(unsigned short id,
sdevice == PCI_SUBDEVICE_ID_QEMU));
}
-static unsigned short
-intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
+static void
+intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
+ unsigned short *pch_id, enum intel_pch *pch_type)
{
unsigned short id = 0;
@@ -181,12 +182,21 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
else
drm_dbg_kms(&dev_priv->drm, "Assuming no PCH\n");
- return id;
+ *pch_type = intel_pch_type(dev_priv, id);
+
+ /* Sanity check virtual PCH id */
+ if (drm_WARN_ON(&dev_priv->drm,
+ id && *pch_type == PCH_NONE))
+ id = 0;
+
+ *pch_id = id;
}
void intel_detect_pch(struct drm_i915_private *dev_priv)
{
struct pci_dev *pch = NULL;
+ unsigned short id;
+ enum intel_pch pch_type;
/* DG1 has south engine display on the same PCI device */
if (IS_DG1(dev_priv)) {
@@ -206,9 +216,6 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
* of only checking the first one.
*/
while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
- unsigned short id;
- enum intel_pch pch_type;
-
if (pch->vendor != PCI_VENDOR_ID_INTEL)
continue;
@@ -221,14 +228,7 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
break;
} else if (intel_is_virt_pch(id, pch->subsystem_vendor,
pch->subsystem_device)) {
- id = intel_virt_detect_pch(dev_priv);
- pch_type = intel_pch_type(dev_priv, id);
-
- /* Sanity check virtual PCH id */
- if (drm_WARN_ON(&dev_priv->drm,
- id && pch_type == PCH_NONE))
- id = 0;
-
+ intel_virt_detect_pch(dev_priv, &id, &pch_type);
dev_priv->pch_type = pch_type;
dev_priv->pch_id = id;
break;
@@ -244,10 +244,15 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
"Display disabled, reverting to NOP PCH\n");
dev_priv->pch_type = PCH_NOP;
dev_priv->pch_id = 0;
+ } else if (!pch) {
+ if (run_as_guest() && HAS_DISPLAY(dev_priv)) {
+ intel_virt_detect_pch(dev_priv, &id, &pch_type);
+ dev_priv->pch_type = pch_type;
+ dev_priv->pch_id = id;
+ } else {
+ drm_dbg_kms(&dev_priv->drm, "No PCH found.\n");
+ }
}
- if (!pch)
- drm_dbg_kms(&dev_priv->drm, "No PCH found.\n");
-
pci_dev_put(pch);
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a20b5051f18c..0c3e63f27c29 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -82,24 +82,24 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
* Must match Sampler, Pixel Back End, and Media. See
* WaCompressedResourceSamplerPbeMediaNewHashMode.
*/
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) |
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) |
SKL_DE_COMPRESSED_HASH_MODE);
}
/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
/* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
- I915_WRITE(GEN8_CHICKEN_DCPR_1,
- I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
+ intel_uncore_write(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
+ intel_uncore_read(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
/*
* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl
* Display WA #0859: skl,bxt,kbl,glk,cfl
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_MEMORY_WAKE);
}
@@ -108,21 +108,21 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
gen9_init_clock_gating(dev_priv);
/* WaDisableSDEUnitClockGating:bxt */
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/*
* FIXME:
* GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
*/
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
/*
* Wa: Backlight PWM may stop in the asserted state, causing backlight
* to stay fully on.
*/
- I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
+ intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS);
/*
@@ -131,20 +131,20 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
* is off and a MMIO access is attempted by any privilege
* application, using batch buffers or any other means.
*/
- I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
+ intel_uncore_write(&dev_priv->uncore, RM_TIMEOUT, MMIO_TIMEOUT_US(950));
/*
* WaFbcTurnOffFbcWatermark:bxt
* Display WA #0562: bxt
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_WM_DIS);
/*
* WaFbcHighMemBwCorruptionAvoidance:bxt
* Display WA #0883: bxt
*/
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_DISABLE_DUMMY0);
}
@@ -157,7 +157,7 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
* Backlight PWM may stop in the asserted state, causing backlight
* to stay fully on.
*/
- I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
+ intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS);
}
@@ -165,7 +165,7 @@ static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
{
u32 tmp;
- tmp = I915_READ(CLKCFG);
+ tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
switch (tmp & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_533:
@@ -195,7 +195,7 @@ static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
}
/* detect pineview DDR3 setting */
- tmp = I915_READ(CSHRDDR3CTL);
+ tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL);
dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
}
@@ -366,39 +366,39 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
u32 val;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
- I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
- POSTING_READ(FW_BLC_SELF_VLV);
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
} else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
- was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
- I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
- POSTING_READ(FW_BLC_SELF);
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
} else if (IS_PINEVIEW(dev_priv)) {
- val = I915_READ(DSPFW3);
+ val = intel_uncore_read(&dev_priv->uncore, DSPFW3);
was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
if (enable)
val |= PINEVIEW_SELF_REFRESH_EN;
else
val &= ~PINEVIEW_SELF_REFRESH_EN;
- I915_WRITE(DSPFW3, val);
- POSTING_READ(DSPFW3);
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, val);
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW3);
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
- was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
- I915_WRITE(FW_BLC_SELF, val);
- POSTING_READ(FW_BLC_SELF);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
} else if (IS_I915GM(dev_priv)) {
/*
* FIXME can't find a bit like this for 915G, and
* and yet it does have the related watermark in
* FW_BLC_SELF. What's going on?
*/
- was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
+ was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
_MASKED_BIT_DISABLE(INSTPM_SELF_EN);
- I915_WRITE(INSTPM, val);
- POSTING_READ(INSTPM);
+ intel_uncore_write(&dev_priv->uncore, INSTPM, val);
+ intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
} else {
return false;
}
@@ -494,20 +494,20 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
switch (pipe) {
case PIPE_A:
- dsparb = I915_READ(DSPARB);
- dsparb2 = I915_READ(DSPARB2);
+ dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
break;
case PIPE_B:
- dsparb = I915_READ(DSPARB);
- dsparb2 = I915_READ(DSPARB2);
+ dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
break;
case PIPE_C:
- dsparb2 = I915_READ(DSPARB2);
- dsparb3 = I915_READ(DSPARB3);
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
break;
@@ -525,7 +525,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = I915_READ(DSPARB);
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
int size;
size = dsparb & 0x7f;
@@ -541,7 +541,7 @@ static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = I915_READ(DSPARB);
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
int size;
size = dsparb & 0x1ff;
@@ -558,7 +558,7 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = I915_READ(DSPARB);
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
int size;
size = dsparb & 0x7f;
@@ -911,38 +911,38 @@ static void pnv_update_wm(struct intel_crtc *unused_crtc)
wm = intel_calculate_wm(clock, &pnv_display_wm,
pnv_display_wm.fifo_size,
cpp, latency->display_sr);
- reg = I915_READ(DSPFW1);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW1);
reg &= ~DSPFW_SR_MASK;
reg |= FW_WM(wm, SR);
- I915_WRITE(DSPFW1, reg);
+ intel_uncore_write(&dev_priv->uncore, DSPFW1, reg);
drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
/* cursor SR */
wm = intel_calculate_wm(clock, &pnv_cursor_wm,
pnv_display_wm.fifo_size,
4, latency->cursor_sr);
- reg = I915_READ(DSPFW3);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
reg |= FW_WM(wm, CURSOR_SR);
- I915_WRITE(DSPFW3, reg);
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
/* Display HPLL off SR */
wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
cpp, latency->display_hpll_disable);
- reg = I915_READ(DSPFW3);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
reg |= FW_WM(wm, HPLL_SR);
- I915_WRITE(DSPFW3, reg);
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
/* cursor HPLL off SR */
wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
4, latency->cursor_hpll_disable);
- reg = I915_READ(DSPFW3);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= FW_WM(wm, HPLL_CURSOR);
- I915_WRITE(DSPFW3, reg);
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
intel_set_memory_cxsr(dev_priv, true);
@@ -976,25 +976,25 @@ static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe)
trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
- I915_WRITE(DSPFW1,
+ intel_uncore_write(&dev_priv->uncore, DSPFW1,
FW_WM(wm->sr.plane, SR) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- I915_WRITE(DSPFW2,
+ intel_uncore_write(&dev_priv->uncore, DSPFW2,
(wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
FW_WM(wm->sr.fbc, FBC_SR) |
FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- I915_WRITE(DSPFW3,
+ intel_uncore_write(&dev_priv->uncore, DSPFW3,
(wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
FW_WM(wm->sr.cursor, CURSOR_SR) |
FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
FW_WM(wm->hpll.plane, HPLL_SR));
- POSTING_READ(DSPFW1);
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
}
#define FW_WM_VLV(value, plane) \
@@ -1008,7 +1008,7 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
- I915_WRITE(VLV_DDL(pipe),
+ intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
(wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
(wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
(wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
@@ -1020,35 +1020,35 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
* high order bits so that there are no out of bounds values
* present in the registers during the reprogramming.
*/
- I915_WRITE(DSPHOWM, 0);
- I915_WRITE(DSPHOWM1, 0);
- I915_WRITE(DSPFW4, 0);
- I915_WRITE(DSPFW5, 0);
- I915_WRITE(DSPFW6, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
- I915_WRITE(DSPFW1,
+ intel_uncore_write(&dev_priv->uncore, DSPFW1,
FW_WM(wm->sr.plane, SR) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- I915_WRITE(DSPFW2,
+ intel_uncore_write(&dev_priv->uncore, DSPFW2,
FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- I915_WRITE(DSPFW3,
+ intel_uncore_write(&dev_priv->uncore, DSPFW3,
FW_WM(wm->sr.cursor, CURSOR_SR));
if (IS_CHERRYVIEW(dev_priv)) {
- I915_WRITE(DSPFW7_CHV,
+ intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- I915_WRITE(DSPFW8_CHV,
+ intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
- I915_WRITE(DSPFW9_CHV,
+ intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
- I915_WRITE(DSPHOWM,
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM,
FW_WM(wm->sr.plane >> 9, SR_HI) |
FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
@@ -1060,10 +1060,10 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
} else {
- I915_WRITE(DSPFW7,
+ intel_uncore_write(&dev_priv->uncore, DSPFW7,
FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- I915_WRITE(DSPHOWM,
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM,
FW_WM(wm->sr.plane >> 9, SR_HI) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
@@ -1073,7 +1073,7 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
}
- POSTING_READ(DSPFW1);
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
}
#undef FW_WM_VLV
@@ -2310,14 +2310,14 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
srwm);
/* 965 has limitations... */
- I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
+ intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) |
FW_WM(8, CURSORB) |
FW_WM(8, PLANEB) |
FW_WM(8, PLANEA));
- I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
+ intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) |
FW_WM(8, PLANEC_OLD));
/* update cursor SR watermark */
- I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
@@ -2447,10 +2447,10 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
srwm = 1;
if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
- I915_WRITE(FW_BLC_SELF,
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
else
- I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
}
drm_dbg_kms(&dev_priv->drm,
@@ -2464,8 +2464,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
fwater_hi = fwater_hi | (1 << 8);
- I915_WRITE(FW_BLC, fwater_lo);
- I915_WRITE(FW_BLC2, fwater_hi);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
if (enabled)
intel_set_memory_cxsr(dev_priv, true);
@@ -2488,13 +2488,13 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
&i845_wm_info,
dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
4, pessimal_latency_ns);
- fwater_lo = I915_READ(FW_BLC) & ~0xfff;
+ fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
drm_dbg_kms(&dev_priv->drm,
"Setting FIFO watermarks - A: %d\n", planea_wm);
- I915_WRITE(FW_BLC, fwater_lo);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
}
/* latency must be in 0.1us units. */
@@ -2930,7 +2930,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
* any underrun. If not able to get Dimm info assume 16GB dimm
* to avoid any underrun.
*/
- if (dev_priv->dram_info.is_16gb_dimm)
+ if (dev_priv->dram_info.wm_lv_0_adjust_needed)
wm[0] += 1;
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -3534,17 +3534,17 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
previous->wm_lp[2] &= ~WM1_LP_SR_EN;
- I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
+ intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
changed = true;
}
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
previous->wm_lp[1] &= ~WM1_LP_SR_EN;
- I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
+ intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
changed = true;
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
previous->wm_lp[0] &= ~WM1_LP_SR_EN;
- I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
+ intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
changed = true;
}
@@ -3574,56 +3574,56 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
_ilk_disable_lp_wm(dev_priv, dirty);
if (dirty & WM_DIRTY_PIPE(PIPE_A))
- I915_WRITE(WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
if (dirty & WM_DIRTY_PIPE(PIPE_B))
- I915_WRITE(WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
if (dirty & WM_DIRTY_PIPE(PIPE_C))
- I915_WRITE(WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
if (dirty & WM_DIRTY_DDB) {
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- val = I915_READ(WM_MISC);
+ val = intel_uncore_read(&dev_priv->uncore, WM_MISC);
if (results->partitioning == INTEL_DDB_PART_1_2)
val &= ~WM_MISC_DATA_PARTITION_5_6;
else
val |= WM_MISC_DATA_PARTITION_5_6;
- I915_WRITE(WM_MISC, val);
+ intel_uncore_write(&dev_priv->uncore, WM_MISC, val);
} else {
- val = I915_READ(DISP_ARB_CTL2);
+ val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
if (results->partitioning == INTEL_DDB_PART_1_2)
val &= ~DISP_DATA_PARTITION_5_6;
else
val |= DISP_DATA_PARTITION_5_6;
- I915_WRITE(DISP_ARB_CTL2, val);
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
}
}
if (dirty & WM_DIRTY_FBC) {
- val = I915_READ(DISP_ARB_CTL);
+ val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL);
if (results->enable_fbc_wm)
val &= ~DISP_FBC_WM_DIS;
else
val |= DISP_FBC_WM_DIS;
- I915_WRITE(DISP_ARB_CTL, val);
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, val);
}
if (dirty & WM_DIRTY_LP(1) &&
previous->wm_lp_spr[0] != results->wm_lp_spr[0])
- I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
+ intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
if (INTEL_GEN(dev_priv) >= 7) {
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
- I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
+ intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
- I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
+ intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
- I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
+ intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
- I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
+ intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
- I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
+ intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
dev_priv->wm.hw = *results;
}
@@ -3640,7 +3640,7 @@ u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
u8 enabled_slices_mask = 0;
for (i = 0; i < max_slices; i++) {
- if (I915_READ(DBUF_CTL_S(i)) & DBUF_POWER_STATE)
+ if (intel_uncore_read(&dev_priv->uncore, DBUF_CTL_S(i)) & DBUF_POWER_STATE)
enabled_slices_mask |= BIT(i);
}
@@ -4017,43 +4017,48 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
return 0;
}
-/*
- * Calculate initial DBuf slice offset, based on slice size
- * and mask(i.e if slice size is 1024 and second slice is enabled
- * offset would be 1024)
- */
-static unsigned int
-icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
- u32 slice_size,
- u32 ddb_size)
+static int intel_dbuf_size(struct drm_i915_private *dev_priv)
{
- unsigned int offset = 0;
+ int ddb_size = INTEL_INFO(dev_priv)->ddb_size;
- if (!dbuf_slice_mask)
- return 0;
+ drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
- offset = (ffs(dbuf_slice_mask) - 1) * slice_size;
+ if (INTEL_GEN(dev_priv) < 11)
+ return ddb_size - 4; /* 4 blocks for bypass path allocation */
- WARN_ON(offset >= ddb_size);
- return offset;
+ return ddb_size;
}
-u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
+static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv)
{
- u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
- drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
+ return intel_dbuf_size(dev_priv) /
+ INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+}
- if (INTEL_GEN(dev_priv) < 11)
- return ddb_size - 4; /* 4 blocks for bypass path allocation */
+static void
+skl_ddb_entry_for_slices(struct drm_i915_private *dev_priv, u8 slice_mask,
+ struct skl_ddb_entry *ddb)
+{
+ int slice_size = intel_dbuf_slice_size(dev_priv);
- return ddb_size;
+ if (!slice_mask) {
+ ddb->start = 0;
+ ddb->end = 0;
+ return;
+ }
+
+ ddb->start = (ffs(slice_mask) - 1) * slice_size;
+ ddb->end = fls(slice_mask) * slice_size;
+
+ WARN_ON(ddb->start >= ddb->end);
+ WARN_ON(ddb->end > intel_dbuf_size(dev_priv));
}
u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry)
{
u32 slice_mask = 0;
- u16 ddb_size = intel_get_ddb_size(dev_priv);
+ u16 ddb_size = intel_dbuf_size(dev_priv);
u16 num_supported_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
u16 slice_size = ddb_size / num_supported_slices;
u16 start_slice;
@@ -4077,116 +4082,40 @@ u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
return slice_mask;
}
-static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
- u8 active_pipes);
-
-static int
-skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *crtc_state,
- const u64 total_data_rate,
- struct skl_ddb_entry *alloc, /* out */
- int *num_active /* out */)
-{
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
- const struct intel_crtc *crtc;
- u32 pipe_width = 0, total_width_in_range = 0, width_before_pipe_in_range = 0;
- enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
- struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(intel_state);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(intel_state);
- u8 active_pipes = new_dbuf_state->active_pipes;
- u16 ddb_size;
- u32 ddb_range_size;
- u32 i;
- u32 dbuf_slice_mask;
- u32 offset;
- u32 slice_size;
- u32 total_slice_mask;
- u32 start, end;
- int ret;
-
- *num_active = hweight8(active_pipes);
-
- if (!crtc_state->hw.active) {
- alloc->start = 0;
- alloc->end = 0;
- return 0;
- }
-
- ddb_size = intel_get_ddb_size(dev_priv);
-
- slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int hdisplay, vdisplay;
- /*
- * If the state doesn't change the active CRTC's or there is no
- * modeset request, then there's no need to recalculate;
- * the existing pipe allocation limits should remain unchanged.
- * Note that we're safe from racing commits since any racing commit
- * that changes the active CRTC list or do modeset would need to
- * grab _all_ crtc locks, including the one we currently hold.
- */
- if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes &&
- !dev_priv->wm.distrust_bios_wm) {
- /*
- * alloc may be cleared by clear_intel_crtc_state,
- * copy from old state to be sure
- *
- * FIXME get rid of this mess
- */
- *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
+ if (!crtc_state->hw.active)
return 0;
- }
-
- /*
- * Get allowed DBuf slices for correspondent pipe and platform.
- */
- dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, active_pipes);
-
- /*
- * Figure out at which DBuf slice we start, i.e if we start at Dbuf S2
- * and slice size is 1024, the offset would be 1024
- */
- offset = icl_get_first_dbuf_slice_offset(dbuf_slice_mask,
- slice_size, ddb_size);
-
- /*
- * Figure out total size of allowed DBuf slices, which is basically
- * a number of allowed slices for that pipe multiplied by slice size.
- * Inside of this
- * range ddb entries are still allocated in proportion to display width.
- */
- ddb_range_size = hweight8(dbuf_slice_mask) * slice_size;
/*
* Watermark/ddb requirement highly depends upon width of the
* framebuffer, So instead of allocating DDB equally among pipes
* distribute DDB based on resolution/width of the display.
*/
- total_slice_mask = dbuf_slice_mask;
- for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
- const struct drm_display_mode *pipe_mode =
- &crtc_state->hw.pipe_mode;
- enum pipe pipe = crtc->pipe;
- int hdisplay, vdisplay;
- u32 pipe_dbuf_slice_mask;
+ drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
- if (!crtc_state->hw.active)
- continue;
+ return hdisplay;
+}
- pipe_dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state,
- active_pipes);
+static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
+ enum pipe for_pipe,
+ unsigned int *weight_start,
+ unsigned int *weight_end,
+ unsigned int *weight_total)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(dbuf_state->base.state->base.dev);
+ enum pipe pipe;
- /*
- * According to BSpec pipe can share one dbuf slice with another
- * pipes or pipe can use multiple dbufs, in both cases we
- * account for other pipes only if they have exactly same mask.
- * However we need to account how many slices we should enable
- * in total.
- */
- total_slice_mask |= pipe_dbuf_slice_mask;
+ *weight_start = 0;
+ *weight_end = 0;
+ *weight_total = 0;
+
+ for_each_pipe(dev_priv, pipe) {
+ int weight = dbuf_state->weight[pipe];
/*
* Do not account pipes using other slice sets
@@ -4195,42 +4124,78 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
* i.e no partial intersection), so it is enough to check for
* equality for now.
*/
- if (dbuf_slice_mask != pipe_dbuf_slice_mask)
+ if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
continue;
- drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
+ *weight_total += weight;
+ if (pipe < for_pipe) {
+ *weight_start += weight;
+ *weight_end += weight;
+ } else if (pipe == for_pipe) {
+ *weight_end += weight;
+ }
+ }
+}
- total_width_in_range += hdisplay;
+static int
+skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ unsigned int weight_total, weight_start, weight_end;
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ struct intel_crtc_state *crtc_state;
+ struct skl_ddb_entry ddb_slices;
+ enum pipe pipe = crtc->pipe;
+ u32 ddb_range_size;
+ u32 dbuf_slice_mask;
+ u32 start, end;
+ int ret;
- if (pipe < for_pipe)
- width_before_pipe_in_range += hdisplay;
- else if (pipe == for_pipe)
- pipe_width = hdisplay;
+ if (new_dbuf_state->weight[pipe] == 0) {
+ new_dbuf_state->ddb[pipe].start = 0;
+ new_dbuf_state->ddb[pipe].end = 0;
+ goto out;
}
- /*
- * FIXME: For now we always enable slice S1 as per
- * the Bspec display initialization sequence.
- */
- new_dbuf_state->enabled_slices = total_slice_mask | BIT(DBUF_S1);
+ dbuf_slice_mask = new_dbuf_state->slices[pipe];
- if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
- ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
- }
+ skl_ddb_entry_for_slices(dev_priv, dbuf_slice_mask, &ddb_slices);
+ ddb_range_size = skl_ddb_entry_size(&ddb_slices);
+
+ intel_crtc_dbuf_weights(new_dbuf_state, pipe,
+ &weight_start, &weight_end, &weight_total);
+
+ start = ddb_range_size * weight_start / weight_total;
+ end = ddb_range_size * weight_end / weight_total;
+
+ new_dbuf_state->ddb[pipe].start = ddb_slices.start + start;
+ new_dbuf_state->ddb[pipe].end = ddb_slices.start + end;
+
+out:
+ if (skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
+ &new_dbuf_state->ddb[pipe]))
+ return 0;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
- start = ddb_range_size * width_before_pipe_in_range / total_width_in_range;
- end = ddb_range_size *
- (width_before_pipe_in_range + pipe_width) / total_width_in_range;
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
- alloc->start = offset + start;
- alloc->end = offset + end;
+ crtc_state->wm.skl.ddb = new_dbuf_state->ddb[pipe];
drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n",
- for_crtc->base.id, for_crtc->name,
- dbuf_slice_mask, alloc->start, alloc->end, active_pipes);
+ "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
+ old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
+ new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
+ old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
return 0;
}
@@ -4300,12 +4265,12 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
- val = I915_READ(CUR_BUF_CFG(pipe));
+ val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe));
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
return;
}
- val = I915_READ(PLANE_CTL(pipe, plane_id));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_CTL(pipe, plane_id));
/* No DDB allocated for disabled planes */
if (val & PLANE_CTL_ENABLE)
@@ -4314,11 +4279,11 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
val & PLANE_CTL_ALPHA_MASK);
if (INTEL_GEN(dev_priv) >= 11) {
- val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
} else {
- val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
- val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
+ val2 = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
if (fourcc &&
drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc)))
@@ -4632,10 +4597,8 @@ static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
}
-static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
- u8 active_pipes)
+static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -4798,55 +4761,30 @@ skl_plane_wm_level(const struct intel_crtc_state *crtc_state,
}
static int
-skl_allocate_pipe_ddb(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+skl_allocate_plane_ddb(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
+ const struct intel_dbuf_state *dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
+ int num_active = hweight8(dbuf_state->active_pipes);
u16 alloc_size, start = 0;
u16 total[I915_MAX_PLANES] = {};
u16 uv_total[I915_MAX_PLANES] = {};
u64 total_data_rate;
enum plane_id plane_id;
- int num_active;
u32 blocks;
int level;
- int ret;
/* Clear the partitioning for disabled planes. */
memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
- if (!crtc_state->hw.active) {
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
- struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
-
- /*
- * FIXME hack to make sure we compute this sensibly when
- * turning off all the pipes. Otherwise we leave it at
- * whatever we had previously, and then runtime PM will
- * mess it up by turning off all but S1. Remove this
- * once the dbuf state computation flow becomes sane.
- */
- if (new_dbuf_state->active_pipes == 0) {
- new_dbuf_state->enabled_slices = BIT(DBUF_S1);
-
- if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
- ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
- }
- }
-
- alloc->start = alloc->end = 0;
+ if (!crtc_state->hw.active)
return 0;
- }
if (INTEL_GEN(dev_priv) >= 11)
total_data_rate =
@@ -4855,12 +4793,6 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
total_data_rate =
skl_get_total_relative_data_rate(state, crtc);
- ret = skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state,
- total_data_rate,
- alloc, &num_active);
- if (ret)
- return ret;
-
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0)
return 0;
@@ -5731,6 +5663,18 @@ static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
return a->start < b->end && b->start < a->end;
}
+static void skl_ddb_entry_union(struct skl_ddb_entry *a,
+ const struct skl_ddb_entry *b)
+{
+ if (a->end && b->end) {
+ a->start = min(a->start, b->start);
+ a->end = max(a->end, b->end);
+ } else if (b->end) {
+ a->start = b->start;
+ a->end = b->end;
+ }
+}
+
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
const struct skl_ddb_entry *entries,
int num_entries, int ignore_idx)
@@ -5775,39 +5719,114 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
return 0;
}
+static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(dbuf_state->base.state->base.dev);
+ u8 enabled_slices;
+ enum pipe pipe;
+
+ /*
+ * FIXME: For now we always enable slice S1 as per
+ * the Bspec display initialization sequence.
+ */
+ enabled_slices = BIT(DBUF_S1);
+
+ for_each_pipe(dev_priv, pipe)
+ enabled_slices |= dbuf_state->slices[pipe];
+
+ return enabled_slices;
+}
+
static int
skl_compute_ddb(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_dbuf_state *old_dbuf_state;
- const struct intel_dbuf_state *new_dbuf_state;
+ struct intel_dbuf_state *new_dbuf_state = NULL;
const struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int ret, i;
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- ret = skl_allocate_pipe_ddb(state, crtc);
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ new_dbuf_state = intel_atomic_get_dbuf_state(state);
+ if (IS_ERR(new_dbuf_state))
+ return PTR_ERR(new_dbuf_state);
+
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ break;
+ }
+
+ if (!new_dbuf_state)
+ return 0;
+
+ new_dbuf_state->active_pipes =
+ intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
+
+ if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
if (ret)
return ret;
+ }
- ret = skl_ddb_add_affected_planes(old_crtc_state,
- new_crtc_state);
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ enum pipe pipe = crtc->pipe;
+
+ new_dbuf_state->slices[pipe] =
+ skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes);
+
+ if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
+ continue;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
if (ret)
return ret;
}
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
- new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+ new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
+
+ if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
+ ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
- if (new_dbuf_state &&
- new_dbuf_state->enabled_slices != old_dbuf_state->enabled_slices)
drm_dbg_kms(&dev_priv->drm,
"Enabled dbuf slices 0x%x -> 0x%x (out of %d dbuf slices)\n",
old_dbuf_state->enabled_slices,
new_dbuf_state->enabled_slices,
INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
+
+ if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
+ continue;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+ }
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ ret = skl_crtc_allocate_ddb(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = skl_allocate_plane_ddb(state, crtc);
+ if (ret)
+ return ret;
+
+ ret = skl_ddb_add_affected_planes(old_crtc_state,
+ new_crtc_state);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -5944,83 +5963,6 @@ skl_print_wm_changes(struct intel_atomic_state *state)
}
}
-static int intel_add_affected_pipes(struct intel_atomic_state *state,
- u8 pipe_mask)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state;
-
- if ((pipe_mask & BIT(crtc->pipe)) == 0)
- continue;
-
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- }
-
- return 0;
-}
-
-static int
-skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc;
- int i, ret;
-
- if (dev_priv->wm.distrust_bios_wm) {
- /*
- * skl_ddb_get_pipe_allocation_limits() currently requires
- * all active pipes to be included in the state so that
- * it can redistribute the dbuf among them, and it really
- * wants to recompute things when distrust_bios_wm is set
- * so we add all the pipes to the state.
- */
- ret = intel_add_affected_pipes(state, ~0);
- if (ret)
- return ret;
- }
-
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- struct intel_dbuf_state *new_dbuf_state;
- const struct intel_dbuf_state *old_dbuf_state;
-
- new_dbuf_state = intel_atomic_get_dbuf_state(state);
- if (IS_ERR(new_dbuf_state))
- return PTR_ERR(new_dbuf_state);
-
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
-
- new_dbuf_state->active_pipes =
- intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
-
- if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes)
- break;
-
- ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
-
- /*
- * skl_ddb_get_pipe_allocation_limits() currently requires
- * all active pipes to be included in the state so that
- * it can redistribute the dbuf among them.
- */
- ret = intel_add_affected_pipes(state,
- new_dbuf_state->active_pipes);
- if (ret)
- return ret;
-
- break;
- }
-
- return 0;
-}
-
/*
* To make sure the cursor watermark registers are always consistent
* with our computed state the following scenario needs special
@@ -6088,15 +6030,6 @@ skl_compute_wm(struct intel_atomic_state *state)
struct intel_crtc_state *new_crtc_state;
int ret, i;
- ret = skl_ddb_add_affected_pipes(state);
- if (ret)
- return ret;
-
- /*
- * Calculate WM's for all pipes that are part of this transaction.
- * Note that skl_ddb_add_affected_pipes may have added more CRTC's that
- * weren't otherwise being modified if pipe allocations had to change.
- */
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
ret = skl_build_pipe_wm(state, crtc);
if (ret)
@@ -6231,9 +6164,9 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
for (level = 0; level <= max_level; level++) {
if (plane_id != PLANE_CURSOR)
- val = I915_READ(PLANE_WM(pipe, plane_id, level));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_WM(pipe, plane_id, level));
else
- val = I915_READ(CUR_WM(pipe, level));
+ val = intel_uncore_read(&dev_priv->uncore, CUR_WM(pipe, level));
skl_wm_level_from_reg_val(val, &wm->wm[level]);
}
@@ -6242,9 +6175,9 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
wm->sagv_wm0 = wm->wm[0];
if (plane_id != PLANE_CURSOR)
- val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_WM_TRANS(pipe, plane_id));
else
- val = I915_READ(CUR_WM_TRANS(pipe));
+ val = intel_uncore_read(&dev_priv->uncore, CUR_WM_TRANS(pipe));
skl_wm_level_from_reg_val(val, &wm->trans_wm);
}
@@ -6255,20 +6188,49 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(dev_priv->dbuf.obj.state);
struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
for_each_intel_crtc(&dev_priv->drm, crtc) {
- crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
- }
- if (dev_priv->active_pipes) {
- /* Fully recompute DDB on first atomic commit */
- dev_priv->wm.distrust_bios_wm = true;
+ memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ struct skl_ddb_entry *ddb_uv =
+ &crtc_state->wm.skl.plane_ddb_uv[plane_id];
+
+ skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe,
+ plane_id, ddb_y, ddb_uv);
+
+ skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
+ skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_uv);
+ }
+
+ dbuf_state->slices[pipe] =
+ skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes);
+
+ dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
+
+ crtc_state->wm.skl.ddb = dbuf_state->ddb[pipe];
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
+ dbuf_state->ddb[pipe].end, dbuf_state->active_pipes);
}
+
+ dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
}
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
@@ -6280,7 +6242,7 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
enum pipe pipe = crtc->pipe;
- hw->wm_pipe[pipe] = I915_READ(WM0_PIPE_ILK(pipe));
+ hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
memset(active, 0, sizeof(*active));
@@ -6324,13 +6286,13 @@ static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
{
u32 tmp;
- tmp = I915_READ(DSPFW1);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
- tmp = I915_READ(DSPFW2);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
wm->sr.fbc = _FW_WM(tmp, FBC_SR);
wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
@@ -6338,7 +6300,7 @@ static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
- tmp = I915_READ(DSPFW3);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
@@ -6352,7 +6314,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
u32 tmp;
for_each_pipe(dev_priv, pipe) {
- tmp = I915_READ(VLV_DDL(pipe));
+ tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
wm->ddl[pipe].plane[PLANE_PRIMARY] =
(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
@@ -6364,34 +6326,34 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
}
- tmp = I915_READ(DSPFW1);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
- tmp = I915_READ(DSPFW2);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
- tmp = I915_READ(DSPFW3);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
if (IS_CHERRYVIEW(dev_priv)) {
- tmp = I915_READ(DSPFW7_CHV);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
- tmp = I915_READ(DSPFW8_CHV);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
- tmp = I915_READ(DSPFW9_CHV);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
- tmp = I915_READ(DSPHOWM);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
@@ -6403,11 +6365,11 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
} else {
- tmp = I915_READ(DSPFW7);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
- tmp = I915_READ(DSPHOWM);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
@@ -6428,7 +6390,7 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
g4x_read_wm_values(dev_priv, wm);
- wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+ wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
for_each_intel_crtc(&dev_priv->drm, crtc) {
struct intel_crtc_state *crtc_state =
@@ -6572,7 +6534,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
vlv_read_wm_values(dev_priv, wm);
- wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
wm->level = VLV_WM_LEVEL_PM2;
if (IS_CHERRYVIEW(dev_priv)) {
@@ -6719,9 +6681,9 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
*/
static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
{
- I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
- I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
- I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+ intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK) & ~WM1_LP_SR_EN);
+ intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK) & ~WM1_LP_SR_EN);
+ intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK) & ~WM1_LP_SR_EN);
/*
* Don't touch WM1S_LP_EN here.
@@ -6739,25 +6701,25 @@ void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
for_each_intel_crtc(&dev_priv->drm, crtc)
ilk_pipe_wm_get_hw_state(crtc);
- hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
- hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
- hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
+ hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
+ hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
+ hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
- hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
+ hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
if (INTEL_GEN(dev_priv) >= 7) {
- hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
- hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
+ hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
+ hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
}
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+ hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
else if (IS_IVYBRIDGE(dev_priv))
- hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
+ hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
hw->enable_fbc_wm =
- !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+ !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
}
/**
@@ -6808,14 +6770,14 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
if (!HAS_IPC(dev_priv))
return;
- val = I915_READ(DISP_ARB_CTL2);
+ val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
if (dev_priv->ipc_enabled)
val |= DISP_IPC_ENABLE;
else
val &= ~DISP_IPC_ENABLE;
- I915_WRITE(DISP_ARB_CTL2, val);
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
}
static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
@@ -6850,7 +6812,7 @@ static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
}
static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
@@ -6858,12 +6820,12 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
- I915_WRITE(DSPCNTR(pipe),
- I915_READ(DSPCNTR(pipe)) |
+ intel_uncore_write(&dev_priv->uncore, DSPCNTR(pipe),
+ intel_uncore_read(&dev_priv->uncore, DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
- I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
- POSTING_READ(DSPSURF(pipe));
+ intel_uncore_write(&dev_priv->uncore, DSPSURF(pipe), intel_uncore_read(&dev_priv->uncore, DSPSURF(pipe)));
+ intel_uncore_posting_read(&dev_priv->uncore, DSPSURF(pipe));
}
}
@@ -6879,10 +6841,10 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
- I915_WRITE(PCH_3DCGDIS0,
+ intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS0,
MARIUNIT_CLOCK_GATE_DISABLE |
SVSMUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(PCH_3DCGDIS1,
+ intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS1,
VFMUNIT_CLOCK_GATE_DISABLE);
/*
@@ -6892,12 +6854,12 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
* The bit 5 of 0x42020
* The bit 15 of 0x45000
*/
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- (I915_READ(ILK_DISPLAY_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+ (intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL));
dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
- I915_WRITE(DISP_ARB_CTL,
- (I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL,
+ (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
/*
@@ -6909,18 +6871,18 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
*/
if (IS_IRONLAKE_M(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:ilk */
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE);
}
- I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+ intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
g4x_disable_trickle_feed(dev_priv);
@@ -6938,27 +6900,27 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
+ intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
PCH_DPLUNIT_CLOCK_GATE_DISABLE |
PCH_CPUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN2, intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN2) |
DPLS_EDP_PPS_FIX_DIS);
/* The below fixes the weird display corruption, a few pixels shifted
* downward, on (only) LVDS of some HP laptops with IVY.
*/
for_each_pipe(dev_priv, pipe) {
- val = I915_READ(TRANS_CHICKEN2(pipe));
+ val = intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN2(pipe));
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
if (dev_priv->vbt.fdi_rx_polarity_inverted)
val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
- I915_WRITE(TRANS_CHICKEN2(pipe), val);
+ intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN2(pipe), val);
}
/* WADP0ClockGatingDisable */
for_each_pipe(dev_priv, pipe) {
- I915_WRITE(TRANS_CHICKEN1(pipe),
+ intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(pipe),
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
}
@@ -6967,7 +6929,7 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
{
u32 tmp;
- tmp = I915_READ(MCH_SSKPD);
+ tmp = intel_uncore_read(&dev_priv->uncore, MCH_SSKPD);
if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
drm_dbg_kms(&dev_priv->drm,
"Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
@@ -6978,14 +6940,14 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
{
u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+ intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
- I915_WRITE(GEN6_UCGCTL1,
- I915_READ(GEN6_UCGCTL1) |
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
+ intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
GEN6_CSUNIT_CLOCK_GATE_DISABLE);
@@ -7002,7 +6964,7 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
* WaDisableRCCUnitClockGating:snb
* WaDisableRCPBUnitClockGating:snb
*/
- I915_WRITE(GEN6_UCGCTL2,
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
@@ -7017,14 +6979,14 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
*
* WaFbcAsynchFlipDisableFbcQueue:snb
*/
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL);
- I915_WRITE(ILK_DSPCLK_GATE_D,
- I915_READ(ILK_DSPCLK_GATE_D) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D,
+ intel_uncore_read(&dev_priv->uncore, ILK_DSPCLK_GATE_D) |
ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
@@ -7042,23 +7004,23 @@ static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
* disabled when not needed anymore in order to save power.
*/
if (HAS_PCH_LPT_LP(dev_priv))
- I915_WRITE(SOUTH_DSPCLK_GATE_D,
- I915_READ(SOUTH_DSPCLK_GATE_D) |
+ intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D,
+ intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) |
PCH_LP_PARTITION_LEVEL_DISABLE);
/* WADPOClockGatingDisable:hsw */
- I915_WRITE(TRANS_CHICKEN1(PIPE_A),
- I915_READ(TRANS_CHICKEN1(PIPE_A)) |
+ intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A),
+ intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A)) |
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
{
if (HAS_PCH_LPT_LP(dev_priv)) {
- u32 val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ u32 val = intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
- I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, val);
}
}
@@ -7070,60 +7032,62 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
u32 val;
/* WaTempDisableDOPClkGating:bdw */
- misccpctl = I915_READ(GEN7_MISCCPCTL);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
- val = I915_READ(GEN8_L3SQCREG1);
+ val = intel_uncore_read(&dev_priv->uncore, GEN8_L3SQCREG1);
val &= ~L3_PRIO_CREDITS_MASK;
val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
- I915_WRITE(GEN8_L3SQCREG1, val);
+ intel_uncore_write(&dev_priv->uncore, GEN8_L3SQCREG1, val);
/*
* Wait at least 100 clocks before re-enabling clock gating.
* See the definition of L3SQCREG1 in BSpec.
*/
- POSTING_READ(GEN8_L3SQCREG1);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN8_L3SQCREG1);
udelay(1);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
}
static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* Wa_1409120013:icl,ehl */
- I915_WRITE(ILK_DPFC_CHICKEN,
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
/* This is not an Wa. Enable to reduce Sampler power */
- I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
- I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN,
+ intel_uncore_read(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
/*Wa_14010594013:icl, ehl */
intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
0, CNL_DELAY_PMRSP);
}
-static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
+static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
{
- /* Wa_1409120013:tgl */
- I915_WRITE(ILK_DPFC_CHICKEN,
- ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+ /* Wa_1409120013:tgl,rkl,adl_s,dg1 */
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
+ ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
/* Wa_1409825376:tgl (pre-prod)*/
if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B1))
- I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
+ intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
TGL_VRH_GATING_DIS);
- /* Wa_14011059788:tgl */
+ /* Wa_14011059788:tgl,rkl,adl_s,dg1 */
intel_uncore_rmw(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN,
0, DFR_DISABLE);
}
static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
{
+ gen12lp_init_clock_gating(dev_priv);
+
/* Wa_1409836686:dg1[a0] */
if (IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0))
- I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
+ intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
DPT_GATING_DIS);
}
@@ -7133,7 +7097,7 @@ static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
return;
/* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
+ intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) |
CNP_PWM_CGE_GATING_DISABLE);
}
@@ -7143,35 +7107,35 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
cnp_init_clock_gating(dev_priv);
/* This is not an Wa. Enable for better image quality */
- I915_WRITE(_3D_CHICKEN3,
+ intel_uncore_write(&dev_priv->uncore, _3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
/* WaEnableChickenDCPR:cnl */
- I915_WRITE(GEN8_CHICKEN_DCPR_1,
- I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
+ intel_uncore_write(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
+ intel_uncore_read(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
/*
* WaFbcWakeMemOn:cnl
* Display WA #0859: cnl
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_MEMORY_WAKE);
- val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE);
+ val = intel_uncore_read(&dev_priv->uncore, SLICE_UNIT_LEVEL_CLKGATE);
/* ReadHitWriteOnlyDisable:cnl */
val |= RCCUNIT_CLKGATE_DIS;
- I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
+ intel_uncore_write(&dev_priv->uncore, SLICE_UNIT_LEVEL_CLKGATE, val);
/* Wa_2201832410:cnl */
- val = I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE);
+ val = intel_uncore_read(&dev_priv->uncore, SUBSLICE_UNIT_LEVEL_CLKGATE);
val |= GWUNIT_CLKGATE_DIS;
- I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, val);
+ intel_uncore_write(&dev_priv->uncore, SUBSLICE_UNIT_LEVEL_CLKGATE, val);
/* WaDisableVFclkgate:cnl */
/* WaVFUnitClockGatingDisable:cnl */
- val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE);
+ val = intel_uncore_read(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE);
val |= VFUNIT_CLKGATE_DIS;
- I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
+ intel_uncore_write(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE, val);
}
static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -7180,21 +7144,21 @@ static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
gen9_init_clock_gating(dev_priv);
/* WAC6entrylatency:cfl */
- I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
+ intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
FBC_LLC_FULLY_OPEN);
/*
* WaFbcTurnOffFbcWatermark:cfl
* Display WA #0562: cfl
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_WM_DIS);
/*
* WaFbcNukeOnHostModify:cfl
* Display WA #0873: cfl
*/
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
@@ -7203,31 +7167,31 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
gen9_init_clock_gating(dev_priv);
/* WAC6entrylatency:kbl */
- I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
+ intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
FBC_LLC_FULLY_OPEN);
/* WaDisableSDEUnitClockGating:kbl */
if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/* WaDisableGamClockGating:kbl */
if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
- I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
/*
* WaFbcTurnOffFbcWatermark:kbl
* Display WA #0562: kbl
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_WM_DIS);
/*
* WaFbcNukeOnHostModify:kbl
* Display WA #0873: kbl
*/
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
@@ -7236,32 +7200,32 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
gen9_init_clock_gating(dev_priv);
/* WaDisableDopClockGating:skl */
- I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL) &
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL) &
~GEN7_DOP_CLOCK_GATE_ENABLE);
/* WAC6entrylatency:skl */
- I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
+ intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
FBC_LLC_FULLY_OPEN);
/*
* WaFbcTurnOffFbcWatermark:skl
* Display WA #0562: skl
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_WM_DIS);
/*
* WaFbcNukeOnHostModify:skl
* Display WA #0873: skl
*/
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
/*
* WaFbcHighMemBwCorruptionAvoidance:skl
* Display WA #0883: skl
*/
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_DISABLE_DUMMY0);
}
@@ -7270,42 +7234,42 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
enum pipe pipe;
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- I915_WRITE(CHICKEN_PIPESL_1(PIPE_A),
- I915_READ(CHICKEN_PIPESL_1(PIPE_A)) |
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
HSW_FBCQ_DIS);
/* WaSwitchSolVfFArbitrationPriority:bdw */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+ intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
/* WaPsrDPAMaskVBlankInSRD:bdw */
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
for_each_pipe(dev_priv, pipe) {
- I915_WRITE(CHICKEN_PIPESL_1(pipe),
- I915_READ(CHICKEN_PIPESL_1(pipe)) |
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) |
BDW_DPRS_MASK_VBLANK_SRD);
}
/* WaVSRefCountFullforceMissDisable:bdw */
/* WaDSRefCountFullforceMissDisable:bdw */
- I915_WRITE(GEN7_FF_THREAD_MODE,
- I915_READ(GEN7_FF_THREAD_MODE) &
+ intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
+ intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) &
~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
- I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+ intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL,
_MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
/* WaDisableSDEUnitClockGating:bdw */
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/* WaProgramL3SqcReg1Default:bdw */
gen8_set_l3sqc_credits(dev_priv, 30, 2);
/* WaKVMNotificationOnConfigChange:bdw */
- I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR2_1, intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR2_1)
| KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
lpt_init_clock_gating(dev_priv);
@@ -7315,24 +7279,24 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
* Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
* clock gating.
*/
- I915_WRITE(GEN6_UCGCTL1,
- I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
+ intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
}
static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- I915_WRITE(CHICKEN_PIPESL_1(PIPE_A),
- I915_READ(CHICKEN_PIPESL_1(PIPE_A)) |
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
HSW_FBCQ_DIS);
/* This is required by WaCatErrorRejectionIssue:hsw */
- I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
- I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
/* WaSwitchSolVfFArbitrationPriority:hsw */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+ intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
lpt_init_clock_gating(dev_priv);
}
@@ -7341,26 +7305,26 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
{
u32 snpcr;
- I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
/* WaDisableBackToBackFlipFix:ivb */
- I915_WRITE(IVB_CHICKEN3,
+ intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
if (IS_IVB_GT1(dev_priv))
- I915_WRITE(GEN7_ROW_CHICKEN2,
+ intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
else {
/* must write both registers */
- I915_WRITE(GEN7_ROW_CHICKEN2,
+ intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
- I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
+ intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2_GT2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
}
@@ -7368,20 +7332,20 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating:ivb workaround.
*/
- I915_WRITE(GEN6_UCGCTL2,
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
/* This is required by WaCatErrorRejectionIssue:ivb */
- I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
- I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
g4x_disable_trickle_feed(dev_priv);
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ snpcr = intel_uncore_read(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR);
snpcr &= ~GEN6_MBC_SNPCR_MASK;
snpcr |= GEN6_MBC_SNPCR_MED;
- I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+ intel_uncore_write(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR, snpcr);
if (!HAS_PCH_NOP(dev_priv))
cpt_init_clock_gating(dev_priv);
@@ -7392,58 +7356,58 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* WaDisableBackToBackFlipFix:vlv */
- I915_WRITE(IVB_CHICKEN3,
+ intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
/* WaDisableDopClockGating:vlv */
- I915_WRITE(GEN7_ROW_CHICKEN2,
+ intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
/* This is required by WaCatErrorRejectionIssue:vlv */
- I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
- I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
/*
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating:vlv workaround.
*/
- I915_WRITE(GEN6_UCGCTL2,
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
/* WaDisableL3Bank2xClockGate:vlv
* Disabling L3 clock gating- MMIO 940c[25] = 1
* Set bit 25, to disable L3_BANK_2x_CLK_GATING */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, GEN7_UCGCTL4,
+ intel_uncore_read(&dev_priv->uncore, GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
/*
* WaDisableVLVClockGating_VBIIssue:vlv
* Disable clock gating on th GCFG unit to prevent a delay
* in the reporting of vblank events.
*/
- I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
+ intel_uncore_write(&dev_priv->uncore, VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
- I915_WRITE(GEN7_FF_THREAD_MODE,
- I915_READ(GEN7_FF_THREAD_MODE) &
+ intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
+ intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) &
~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
/* WaDisableSemaphoreAndSyncFlipWait:chv */
- I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+ intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL,
_MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
/* WaDisableCSUnitClockGating:chv */
- I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
GEN6_CSUNIT_CLOCK_GATE_DISABLE);
/* WaDisableSDEUnitClockGating:chv */
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/*
@@ -7458,17 +7422,17 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
{
u32 dspclk_gate;
- I915_WRITE(RENCLK_GATE_D1, 0);
- I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
+ intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, 0);
+ intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
GS_UNIT_CLOCK_GATE_DISABLE |
CL_UNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(RAMCLK_GATE_D, 0);
+ intel_uncore_write(&dev_priv->uncore, RAMCLK_GATE_D, 0);
dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
OVRUNIT_CLOCK_GATE_DISABLE |
OVCUNIT_CLOCK_GATE_DISABLE;
if (IS_GM45(dev_priv))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+ intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D, dspclk_gate);
g4x_disable_trickle_feed(dev_priv);
}
@@ -7489,49 +7453,49 @@ static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
{
- I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
+ intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
I965_RCC_CLOCK_GATE_DISABLE |
I965_RCPB_CLOCK_GATE_DISABLE |
I965_ISC_CLOCK_GATE_DISABLE |
I965_FBC_CLOCK_GATE_DISABLE);
- I915_WRITE(RENCLK_GATE_D2, 0);
- I915_WRITE(MI_ARB_STATE,
+ intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, 0);
+ intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
{
- u32 dstate = I915_READ(D_STATE);
+ u32 dstate = intel_uncore_read(&dev_priv->uncore, D_STATE);
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
DSTATE_DOT_CLOCK_GATING;
- I915_WRITE(D_STATE, dstate);
+ intel_uncore_write(&dev_priv->uncore, D_STATE, dstate);
if (IS_PINEVIEW(dev_priv))
- I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+ intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
/* IIR "flip pending" means done if this bit is set */
- I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+ intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
/* interrupts should cause a wake up from C3 */
- I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
+ intel_uncore_write(&dev_priv->uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
- I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
+ intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
- I915_WRITE(MI_ARB_STATE,
+ intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
{
- I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
/* interrupts should cause a wake up from C3 */
- I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
+ intel_uncore_write(&dev_priv->uncore, MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
_MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
- I915_WRITE(MEM_MODE,
+ intel_uncore_write(&dev_priv->uncore, MEM_MODE,
_MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
/*
@@ -7541,13 +7505,13 @@ static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
* abosultely nothing) would not allow FBC to recompress
* until a 2D blit occurs.
*/
- I915_WRITE(SCPD0,
+ intel_uncore_write(&dev_priv->uncore, SCPD0,
_MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D));
}
static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
{
- I915_WRITE(MEM_MODE,
+ intel_uncore_write(&dev_priv->uncore, MEM_MODE,
_MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
_MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
}
@@ -7583,7 +7547,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
if (IS_DG1(dev_priv))
dev_priv->display.init_clock_gating = dg1_init_clock_gating;
else if (IS_GEN(dev_priv, 12))
- dev_priv->display.init_clock_gating = tgl_init_clock_gating;
+ dev_priv->display.init_clock_gating = gen12lp_init_clock_gating;
else if (IS_GEN(dev_priv, 11))
dev_priv->display.init_clock_gating = icl_init_clock_gating;
else if (IS_CANNONLAKE(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index eab83e251dd5..97550cf0b6df 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -9,8 +9,10 @@
#include <linux/types.h>
#include "display/intel_bw.h"
+#include "display/intel_display.h"
#include "display/intel_global_state.h"
+#include "i915_drv.h"
#include "i915_reg.h"
struct drm_device;
@@ -40,7 +42,6 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb_y,
struct skl_ddb_entry *ddb_uv);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv);
-u16 intel_get_ddb_size(struct drm_i915_private *dev_priv);
u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry);
void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
@@ -69,6 +70,10 @@ bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
struct intel_dbuf_state {
struct intel_global_state base;
+ struct skl_ddb_entry ddb[I915_MAX_PIPES];
+ unsigned int weight[I915_MAX_PIPES];
+ u8 slices[I915_MAX_PIPES];
+
u8 enabled_slices;
u8 active_pipes;
};
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 02ebf5a04a9b..0ec0cf191955 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -404,8 +404,8 @@ static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
lockdep_assert_held(&i915->sb_lock);
/*
- * GEN6_PCODE_* are outside of the forcewake domain, we can
- * use te fw I915_READ variants to reduce the amount of work
+ * GEN6_PCODE_* are outside of the forcewake domain, we can use
+ * intel_uncore_read/write_fw variants to reduce the amount of work
* required when reading/writing.
*/
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 1c14a07eba7d..9ac501bcfdad 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -2070,7 +2070,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
* This routine waits until the target register @reg contains the expected
* @value after applying the @mask, i.e. it waits until ::
*
- * (I915_READ_FW(reg) & mask) == value
+ * (intel_uncore_read_fw(uncore, reg) & mask) == value
*
* Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
* For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
@@ -2126,7 +2126,7 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore,
* This routine waits until the target register @reg contains the expected
* @value after applying the @mask, i.e. it waits until ::
*
- * (I915_READ(reg) & mask) == value
+ * (intel_uncore_read(uncore, reg) & mask) == value
*
* Otherwise, the wait will timeout after @timeout_ms milliseconds.
*
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index bd2467284295..59f0da8f1fbb 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -216,7 +216,7 @@ void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
/*
* Like above but the caller must manage the uncore.lock itself.
- * Must be used with I915_READ_FW and friends.
+ * Must be used with intel_uncore_read_fw() and friends.
*/
void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
enum forcewake_domains domains);
@@ -318,8 +318,8 @@ __uncore_write(write_notrace, 32, l, false)
* will be implemented using 2 32-bit writes in an arbitrary order with
* an arbitrary delay between them. This can cause the hardware to
* act upon the intermediate value, possibly leading to corruption and
- * machine death. For this reason we do not support I915_WRITE64, or
- * uncore->funcs.mmio_writeq.
+ * machine death. For this reason we do not support intel_uncore_write64,
+ * or uncore->funcs.mmio_writeq.
*
* When reading a 64-bit value as two 32-bit values, the delay may cause
* the two reads to mismatch, e.g. a timestamp overflowing. Also note that
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index f88473d396f4..f99bb0113726 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -38,8 +38,8 @@ static void quirk_add(struct drm_i915_gem_object *obj,
struct list_head *objects)
{
/* quirk is only for live tiled objects, use it to declare ownership */
- GEM_BUG_ON(obj->mm.quirked);
- obj->mm.quirked = true;
+ GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_set_tiling_quirk(obj);
list_add(&obj->st_link, objects);
}
@@ -85,7 +85,7 @@ static void unpin_ggtt(struct i915_ggtt *ggtt)
struct i915_vma *vma;
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
- if (vma->obj->mm.quirked)
+ if (i915_gem_object_has_tiling_quirk(vma->obj))
i915_vma_unpin(vma);
}
@@ -94,8 +94,8 @@ static void cleanup_objects(struct i915_ggtt *ggtt, struct list_head *list)
struct drm_i915_gem_object *obj, *on;
list_for_each_entry_safe(obj, on, list, st_link) {
- GEM_BUG_ON(!obj->mm.quirked);
- obj->mm.quirked = false;
+ GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_set_tiling_quirk(obj);
i915_gem_object_put(obj);
}
@@ -442,28 +442,22 @@ static int igt_evict_contexts(void *arg)
/* Overfill the GGTT with context objects and so try to evict one. */
for_each_engine(engine, gt, id) {
struct i915_sw_fence fence;
- struct file *file;
-
- file = mock_file(i915);
- if (IS_ERR(file)) {
- err = PTR_ERR(file);
- break;
- }
count = 0;
onstack_fence_init(&fence);
do {
+ struct intel_context *ce;
struct i915_request *rq;
- struct i915_gem_context *ctx;
- ctx = live_context(i915, file);
- if (IS_ERR(ctx))
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
break;
/* We will need some GGTT space for the rq's context */
igt_evict_ctl.fail_if_busy = true;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
igt_evict_ctl.fail_if_busy = false;
+ intel_context_put(ce);
if (IS_ERR(rq)) {
/* When full, fail_if_busy will trigger EBUSY */
@@ -490,8 +484,6 @@ static int igt_evict_contexts(void *arg)
onstack_fence_fini(&fence);
pr_info("Submitted %lu contexts/requests on %s\n",
count, engine->name);
-
- fput(file);
if (err)
break;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 713770fb2b92..c1adea8765a9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -28,6 +28,7 @@
#include "gem/i915_gem_context.h"
#include "gem/selftests/mock_context.h"
#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
#include "i915_random.h"
#include "i915_selftest.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index debbac660519..e9d86dab8677 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -262,7 +262,7 @@ static int live_noa_delay(void *arg)
delay = intel_read_status_page(stream->engine, 0x102);
delay -= intel_read_status_page(stream->engine, 0x100);
- delay = i915_cs_timestamp_ticks_to_ns(i915, delay);
+ delay = intel_gt_clock_interval_to_ns(stream->engine->gt, delay);
pr_info("GPU delay: %uns, expected %lluns\n",
delay, expected);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index e424a6d1a68c..d2a678a2497e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -33,6 +33,7 @@
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_clock_utils.h"
#include "gt/intel_gt_requests.h"
#include "gt/selftest_engine_heartbeat.h"
@@ -1560,7 +1561,7 @@ static u32 trifilter(u32 *a)
static u64 cycles_to_ns(struct intel_engine_cs *engine, u32 cycles)
{
- u64 ns = i915_cs_timestamp_ticks_to_ns(engine->i915, cycles);
+ u64 ns = intel_gt_clock_interval_to_ns(engine->gt, cycles);
return DIV_ROUND_CLOSEST(ns, 1 << TF_BIAS);
}
@@ -1932,9 +1933,7 @@ static int measure_inter_request(struct intel_context *ce)
intel_ring_advance(rq, cs);
i915_request_add(rq);
}
- local_bh_disable();
i915_sw_fence_commit(submit);
- local_bh_enable();
intel_engine_flush_submission(ce->engine);
heap_fence_put(submit);
@@ -2220,11 +2219,9 @@ static int measure_completion(struct intel_context *ce)
intel_ring_advance(rq, cs);
dma_fence_add_callback(&rq->fence, &cb.base, signal_cb);
-
- local_bh_disable();
i915_request_add(rq);
- local_bh_enable();
+ intel_engine_flush_submission(ce->engine);
if (wait_for(READ_ONCE(sema[i]) == -1, 50)) {
err = -EIO;
goto err;
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index ec0ecb4e4ca6..83f6e5f31fb3 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -3,6 +3,7 @@
*
* Copyright © 2018 Intel Corporation
*/
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gem/selftests/igt_gem_utils.h"
@@ -219,6 +220,9 @@ void igt_spinner_fini(struct igt_spinner *spin)
bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
{
+ if (i915_request_is_ready(rq))
+ intel_engine_flush_submission(rq->engine);
+
return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
rq->fence.seqno),
100) &&
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 0aeba8e3af28..ce7adfa3bca0 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -129,6 +129,21 @@ static void igt_object_release(struct drm_i915_gem_object *obj)
i915_gem_object_put(obj);
}
+static bool is_contiguous(struct drm_i915_gem_object *obj)
+{
+ struct scatterlist *sg;
+ dma_addr_t addr = -1;
+
+ for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
+ if (addr != -1 && sg_dma_address(sg) != addr)
+ return false;
+
+ addr = sg_dma_address(sg) + sg_dma_len(sg);
+ }
+
+ return true;
+}
+
static int igt_mock_contiguous(void *arg)
{
struct intel_memory_region *mem = arg;
@@ -150,8 +165,8 @@ static int igt_mock_contiguous(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- if (obj->mm.pages->nents != 1) {
- pr_err("%s min object spans multiple sg entries\n", __func__);
+ if (!is_contiguous(obj)) {
+ pr_err("%s min object spans disjoint sg entries\n", __func__);
err = -EINVAL;
goto err_close_objects;
}
@@ -163,8 +178,8 @@ static int igt_mock_contiguous(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- if (obj->mm.pages->nents != 1) {
- pr_err("%s max object spans multiple sg entries\n", __func__);
+ if (!is_contiguous(obj)) {
+ pr_err("%s max object spans disjoint sg entries\n", __func__);
err = -EINVAL;
goto err_close_objects;
}
@@ -189,8 +204,8 @@ static int igt_mock_contiguous(void *arg)
goto err_close_objects;
}
- if (obj->mm.pages->nents != 1) {
- pr_err("%s object spans multiple sg entries\n", __func__);
+ if (!is_contiguous(obj)) {
+ pr_err("%s object spans disjoint sg entries\n", __func__);
err = -EINVAL;
goto err_close_objects;
}
@@ -337,6 +352,68 @@ out_put:
return err;
}
+#ifndef SZ_8G
+#define SZ_8G BIT_ULL(33)
+#endif
+
+static int igt_mock_max_segment(void *arg)
+{
+ const unsigned int max_segment = i915_sg_segment_size();
+ struct intel_memory_region *mem = arg;
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_buddy_block *block;
+ struct scatterlist *sg;
+ LIST_HEAD(objects);
+ u64 size;
+ int err = 0;
+
+ /*
+ * While we may create very large contiguous blocks, we may need
+ * to break those down for consumption elsewhere. In particular,
+ * dma-mapping with scatterlist elements have an implicit limit of
+ * UINT_MAX on each element.
+ */
+
+ size = SZ_8G;
+ mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ obj = igt_object_create(mem, &objects, size, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_put;
+ }
+
+ size = 0;
+ list_for_each_entry(block, &obj->mm.blocks, link) {
+ if (i915_buddy_block_size(&mem->mm, block) > size)
+ size = i915_buddy_block_size(&mem->mm, block);
+ }
+ if (size < max_segment) {
+ pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n",
+ __func__, max_segment, size);
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
+ if (sg->length > max_segment) {
+ pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
+ __func__, sg->length, max_segment);
+ err = -EINVAL;
+ goto out_close;
+ }
+ }
+
+out_close:
+ close_objects(mem, &objects);
+out_put:
+ intel_memory_region_put(mem);
+ return err;
+}
+
static int igt_gpu_write_dw(struct intel_context *ce,
struct i915_vma *vma,
u32 dword,
@@ -775,14 +852,22 @@ static int _perf_memcpy(struct intel_memory_region *src_mr,
}
sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+ if (t[0] <= 0) {
+ /* ignore the impossible to protect our sanity */
+ pr_debug("Skipping %s src(%s, %s) -> dst(%s, %s) %14s %4lluKiB copy, unstable measurement [%lld, %lld]\n",
+ __func__,
+ src_mr->name, repr_type(src_type),
+ dst_mr->name, repr_type(dst_type),
+ tests[i].name, size >> 10,
+ t[0], t[4]);
+ continue;
+ }
+
pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n",
__func__,
- src_mr->name,
- repr_type(src_type),
- dst_mr->name,
- repr_type(dst_type),
- tests[i].name,
- size >> 10,
+ src_mr->name, repr_type(src_type),
+ dst_mr->name, repr_type(dst_type),
+ tests[i].name, size >> 10,
div64_u64(mul_u32_u32(4 * size,
1000 * 1000 * 1000),
t[1] + 2 * t[2] + t[3]) >> 20);
@@ -848,6 +933,7 @@ int intel_memory_region_mock_selftests(void)
SUBTEST(igt_mock_fill),
SUBTEST(igt_mock_contiguous),
SUBTEST(igt_mock_splintered_region),
+ SUBTEST(igt_mock_max_segment),
};
struct intel_memory_region *mem;
struct drm_i915_private *i915;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index e946bd2087d8..0188f877cab2 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -64,8 +64,6 @@ static void mock_device_release(struct drm_device *dev)
mock_device_flush(i915);
intel_gt_driver_remove(&i915->gt);
- i915_gem_driver_release__contexts(i915);
-
i915_gem_drain_workqueue(i915);
i915_gem_drain_freed_objects(i915);
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index 979d96f27c43..3c6021415274 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -15,21 +15,16 @@ static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
.release = i915_gem_object_release_memory_region,
};
-static struct drm_i915_gem_object *
-mock_object_create(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
+static int mock_object_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags)
{
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;
- struct drm_i915_gem_object *obj;
if (size > mem->mm.size)
- return ERR_PTR(-E2BIG);
-
- obj = i915_gem_object_alloc();
- if (!obj)
- return ERR_PTR(-ENOMEM);
+ return -E2BIG;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class);
@@ -40,13 +35,13 @@ mock_object_create(struct intel_memory_region *mem,
i915_gem_object_init_memory_region(obj, mem, flags);
- return obj;
+ return 0;
}
static const struct intel_memory_region_ops mock_region_ops = {
.init = intel_memory_region_init_buddy,
.release = intel_memory_region_release_buddy,
- .create_object = mock_object_create,
+ .init_object = mock_object_init,
};
struct intel_memory_region *