summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Kconfig35
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug15
-rw-r--r--drivers/gpu/drm/i915/Kconfig.profile27
-rw-r--r--drivers/gpu/drm/i915/Makefile201
-rw-r--r--drivers/gpu/drm/i915/Makefile.header-test37
-rw-r--r--drivers/gpu/drm/i915/display/Makefile2
-rw-r--r--drivers/gpu/drm/i915/display/Makefile.header-test16
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7017.c (renamed from drivers/gpu/drm/i915/dvo_ch7017.c)3
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c (renamed from drivers/gpu/drm/i915/dvo_ch7xxx.c)3
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ivch.c (renamed from drivers/gpu/drm/i915/dvo_ivch.c)3
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c (renamed from drivers/gpu/drm/i915/dvo_ns2501.c)5
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c (renamed from drivers/gpu/drm/i915/dvo_sil164.c)3
-rw-r--r--drivers/gpu/drm/i915/display/dvo_tfp410.c (renamed from drivers/gpu/drm/i915/dvo_tfp410.c)3
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c (renamed from drivers/gpu/drm/i915/icl_dsi.c)171
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c (renamed from drivers/gpu/drm/i915/intel_acpi.c)3
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c (renamed from drivers/gpu/drm/i915/intel_atomic.c)35
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h49
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c (renamed from drivers/gpu/drm/i915/intel_atomic_plane.c)72
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h (renamed from drivers/gpu/drm/i915/intel_atomic_plane.h)10
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c (renamed from drivers/gpu/drm/i915/intel_audio.c)61
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h (renamed from drivers/gpu/drm/i915/intel_audio.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c (renamed from drivers/gpu/drm/i915/intel_bios.c)212
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h (renamed from drivers/gpu/drm/i915/intel_bios.h)21
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c421
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h47
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c (renamed from drivers/gpu/drm/i915/intel_cdclk.c)296
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h (renamed from drivers/gpu/drm/i915/intel_cdclk.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c (renamed from drivers/gpu/drm/i915/intel_color.c)248
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h (renamed from drivers/gpu/drm/i915/intel_color.h)1
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c (renamed from drivers/gpu/drm/i915/intel_combo_phy.c)87
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c (renamed from drivers/gpu/drm/i915/intel_connector.c)3
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.h (renamed from drivers/gpu/drm/i915/intel_connector.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c (renamed from drivers/gpu/drm/i915/intel_crt.c)44
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.h (renamed from drivers/gpu/drm/i915/intel_crt.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c (renamed from drivers/gpu/drm/i915/intel_ddi.c)93
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h (renamed from drivers/gpu/drm/i915/intel_ddi.h)1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c (renamed from drivers/gpu/drm/i915/intel_display.c)1774
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h (renamed from drivers/gpu/drm/i915/intel_display.h)90
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c4618
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h288
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c (renamed from drivers/gpu/drm/i915/intel_dp.c)310
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h (renamed from drivers/gpu/drm/i915/intel_dp.h)1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c (renamed from drivers/gpu/drm/i915/intel_dp_aux_backlight.c)1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c (renamed from drivers/gpu/drm/i915/intel_dp_link_training.c)1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c (renamed from drivers/gpu/drm/i915/intel_dp_mst.c)10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c (renamed from drivers/gpu/drm/i915/intel_dpio_phy.c)42
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.h58
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c (renamed from drivers/gpu/drm/i915/intel_dpll_mgr.c)87
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h (renamed from drivers/gpu/drm/i915/intel_dpll_mgr.h)12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c (renamed from drivers/gpu/drm/i915/intel_dsi.c)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h (renamed from drivers/gpu/drm/i915/intel_dsi.h)8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c (renamed from drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c)8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c (renamed from drivers/gpu/drm/i915/intel_dsi_vbt.c)364
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c (renamed from drivers/gpu/drm/i915/intel_dvo.c)8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.h (renamed from drivers/gpu/drm/i915/intel_dvo.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo_dev.h (renamed from drivers/gpu/drm/i915/dvo.h)10
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c (renamed from drivers/gpu/drm/i915/intel_fbc.c)4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h (renamed from drivers/gpu/drm/i915/intel_fbc.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c (renamed from drivers/gpu/drm/i915/intel_fbdev.c)8
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h (renamed from drivers/gpu/drm/i915/intel_fbdev.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c (renamed from drivers/gpu/drm/i915/intel_fifo_underrun.c)1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c (renamed from drivers/gpu/drm/i915/intel_frontbuffer.c)7
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h (renamed from drivers/gpu/drm/i915/intel_frontbuffer.h)2
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c (renamed from drivers/gpu/drm/i915/intel_i2c.c)100
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c (renamed from drivers/gpu/drm/i915/intel_hdcp.c)55
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h (renamed from drivers/gpu/drm/i915/intel_hdcp.h)1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c (renamed from drivers/gpu/drm/i915/intel_hdmi.c)175
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h (renamed from drivers/gpu/drm/i915/intel_hdmi.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c (renamed from drivers/gpu/drm/i915/intel_hotplug.c)5
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h30
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c (renamed from drivers/gpu/drm/i915/intel_lpe_audio.c)8
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c (renamed from drivers/gpu/drm/i915/intel_lspcon.c)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.h (renamed from drivers/gpu/drm/i915/intel_lspcon.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c (renamed from drivers/gpu/drm/i915/intel_lvds.c)2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.h (renamed from drivers/gpu/drm/i915/intel_lvds.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c (renamed from drivers/gpu/drm/i915/intel_opregion.c)3
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.h (renamed from drivers/gpu/drm/i915/intel_opregion.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c (renamed from drivers/gpu/drm/i915/intel_overlay.c)40
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c (renamed from drivers/gpu/drm/i915/intel_panel.c)4
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h (renamed from drivers/gpu/drm/i915/intel_panel.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c (renamed from drivers/gpu/drm/i915/intel_pipe_crc.c)14
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.h (renamed from drivers/gpu/drm/i915/intel_pipe_crc.h)3
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c (renamed from drivers/gpu/drm/i915/intel_psr.c)51
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h (renamed from drivers/gpu/drm/i915/intel_psr.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c (renamed from drivers/gpu/drm/i915/intel_quirks.c)1
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c (renamed from drivers/gpu/drm/i915/intel_sdvo.c)34
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.h (renamed from drivers/gpu/drm/i915/intel_sdvo.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo_regs.h (renamed from drivers/gpu/drm/i915/intel_sdvo_regs.h)8
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c (renamed from drivers/gpu/drm/i915/intel_sprite.c)45
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h (renamed from drivers/gpu/drm/i915/intel_sprite.h)12
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c (renamed from drivers/gpu/drm/i915/intel_tv.c)9
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.h (renamed from drivers/gpu/drm/i915/intel_tv.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h (renamed from drivers/gpu/drm/i915/intel_vbt_defs.h)633
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c (renamed from drivers/gpu/drm/i915/intel_vdsc.c)2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h21
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c (renamed from drivers/gpu/drm/i915/vlv_dsi.c)230
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c (renamed from drivers/gpu/drm/i915/vlv_dsi_pll.c)18
-rw-r--r--drivers/gpu/drm/i915/gem/Makefile1
-rw-r--r--drivers/gpu/drm/i915/gem/Makefile.header-test16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c139
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c (renamed from drivers/gpu/drm/i915/i915_gem_clflush.c)34
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.h20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c304
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.h21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c (renamed from drivers/gpu/drm/i915/i915_gem_context.c)1200
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h (renamed from drivers/gpu/drm/i915/i915_gem_context.h)106
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h (renamed from drivers/gpu/drm/i915/i915_gem_context_types.h)59
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c (renamed from drivers/gpu/drm/i915/i915_gem_dmabuf.c)42
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c796
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c (renamed from drivers/gpu/drm/i915/i915_gem_execbuffer.c)359
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_fence.c96
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c (renamed from drivers/gpu/drm/i915/i915_gem_internal.c)34
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ioctls.h52
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c508
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c398
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h430
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c107
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.h24
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h262
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c544
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c212
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c294
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.h25
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c571
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c (renamed from drivers/gpu/drm/i915/i915_gem_shrinker.c)181
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c (renamed from drivers/gpu/drm/i915/i915_gem_stolen.c)41
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_throttle.c73
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c (renamed from drivers/gpu/drm/i915/i915_gem_tiling.c)31
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c (renamed from drivers/gpu/drm/i915/i915_gem_userptr.c)40
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c278
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c (renamed from drivers/gpu/drm/i915/i915_gemfs.c)22
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.h16
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c (renamed from drivers/gpu/drm/i915/selftests/huge_gem_object.c)24
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h27
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c (renamed from drivers/gpu/drm/i915/selftests/huge_pages.c)105
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c127
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c (renamed from drivers/gpu/drm/i915/selftests/i915_gem_coherency.c)56
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c (renamed from drivers/gpu/drm/i915/selftests/i915_gem_context.c)379
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c (renamed from drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c)35
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c (renamed from drivers/gpu/drm/i915/selftests/i915_gem_object.c)237
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c99
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c110
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c80
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c34
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h17
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c (renamed from drivers/gpu/drm/i915/selftests/mock_context.c)45
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.h24
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c (renamed from drivers/gpu/drm/i915/selftests/mock_dmabuf.c)22
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h22
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h (renamed from drivers/gpu/drm/i915/selftests/mock_gem_object.h)7
-rw-r--r--drivers/gpu/drm/i915/gt/Makefile2
-rw-r--r--drivers/gpu/drm/i915/gt/Makefile.header-test16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c (renamed from drivers/gpu/drm/i915/intel_breadcrumbs.c)19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c241
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h134
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h (renamed from drivers/gpu/drm/i915/intel_context_types.h)29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h (renamed from drivers/gpu/drm/i915/intel_ringbuffer.h)81
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c (renamed from drivers/gpu/drm/i915/intel_engine_cs.c)521
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c168
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h (renamed from drivers/gpu/drm/i915/intel_engine_types.h)60
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h (renamed from drivers/gpu/drm/i915/intel_gpu_commands.h)1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c143
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_hangcheck.c (renamed from drivers/gpu/drm/i915/intel_hangcheck.c)35
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c (renamed from drivers/gpu/drm/i915/intel_lrc.c)1376
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h (renamed from drivers/gpu/drm/i915/intel_lrc.h)34
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h (renamed from drivers/gpu/drm/i915/intel_lrc_reg.h)2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c (renamed from drivers/gpu/drm/i915/intel_mocs.c)12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.h (renamed from drivers/gpu/drm/i915/intel_mocs.h)4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c (renamed from drivers/gpu/drm/i915/i915_reset.c)226
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h (renamed from drivers/gpu/drm/i915/i915_reset.h)5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ringbuffer.c (renamed from drivers/gpu/drm/i915/intel_ringbuffer.c)468
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c159
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h75
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c (renamed from drivers/gpu/drm/i915/intel_workarounds.c)407
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.h (renamed from drivers/gpu/drm/i915/intel_workarounds.h)10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds_types.h (renamed from drivers/gpu/drm/i915/intel_workarounds_types.h)7
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c (renamed from drivers/gpu/drm/i915/selftests/mock_engine.c)67
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.h (renamed from drivers/gpu/drm/i915/selftests/mock_engine.h)4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c (renamed from drivers/gpu/drm/i915/selftests/intel_engine_cs.c)0
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c (renamed from drivers/gpu/drm/i915/selftests/intel_hangcheck.c)270
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c (renamed from drivers/gpu/drm/i915/selftests/intel_lrc.c)583
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c118
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c (renamed from drivers/gpu/drm/i915/selftests/intel_workarounds.c)497
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h10
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c190
-rw-r--r--drivers/gpu/drm/i915/i915_active.c96
-rw-r--r--drivers/gpu/drm/i915/i915_active.h7
-rw-r--r--drivers/gpu/drm/i915/i915_active_types.h3
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c26
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c555
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.h20
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c161
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1033
-rw-r--r--drivers/gpu/drm/i915/i915_fixed.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c4076
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.h36
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c49
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c207
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.h19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1014
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h167
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.c90
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h509
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gemfs.h34
-rw-r--r--drivers/gpu/drm/i915/i915_globals.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c142
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h7
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c170
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h117
-rw-r--r--drivers/gpu/drm/i915/i915_params.c7
-rw-r--r--drivers/gpu/drm/i915/i915_params.h3
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c63
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c100
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c28
-rw-r--r--drivers/gpu/drm/i915/i915_query.c66
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h107
-rw-r--r--drivers/gpu/drm/i915/i915_request.c548
-rw-r--r--drivers/gpu/drm/i915/i915_request.h19
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.c39
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.h127
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c91
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h18
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler_types.h2
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c6
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c65
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.c14
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h19
-rw-r--r--drivers/gpu/drm/i915/i915_timeline_types.h3
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h9
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h187
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c134
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h38
-rw-r--r--drivers/gpu/drm/i915/intel_context.c270
-rw-r--r--drivers/gpu/drm/i915/intel_context.h87
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c411
-rw-r--r--drivers/gpu/drm/i915/intel_csr.h4
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c78
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h90
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h447
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c196
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h20
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ads.c167
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ads.h1
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ct.c16
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ct.h5
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c117
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h201
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c23
-rw-r--r--drivers/gpu/drm/i915/intel_guc_reg.h25
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c62
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.h3
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c102
-rw-r--r--drivers/gpu/drm/i915/intel_huc.h13
-rw-r--r--drivers/gpu/drm/i915/intel_huc_fw.c73
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c567
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h19
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c4254
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h213
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c483
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.h141
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c148
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h3
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.c126
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.h10
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c55
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h4
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c138
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h164
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.c27
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.h15
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.h45
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c35
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c31
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c86
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_timeline.c30
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c258
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_atomic.h56
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c38
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.h10
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.h42
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_dmabuf.h41
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c48
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c7
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_timeline.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/scatterlist.c3
322 files changed, 27149 insertions, 19274 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 255f224db64b..0d21402945ab 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -45,19 +45,28 @@ config DRM_I915
config DRM_I915_ALPHA_SUPPORT
bool "Enable alpha quality support for new Intel hardware by default"
depends on DRM_I915
- default n
help
- Choose this option if you have new Intel hardware and want to enable
- the alpha quality i915 driver support for the hardware in this kernel
- version. You can also enable the support at runtime using the module
- parameter i915.alpha_support=1; this option changes the default for
- that module parameter.
+ This option is deprecated. Use DRM_I915_FORCE_PROBE option instead.
- It is recommended to upgrade to a kernel version with proper support
- as soon as it is available. Generally fixes for platforms with alpha
- support are not backported to older kernels.
+config DRM_I915_FORCE_PROBE
+ string "Force probe driver for selected new Intel hardware"
+ depends on DRM_I915
+ default "*" if DRM_I915_ALPHA_SUPPORT
+ help
+ This is the default value for the i915.force_probe module
+ parameter. Using the module parameter overrides this option.
- If in doubt, say "N".
+ Force probe the driver for new Intel graphics devices that are
+ recognized but not properly supported by this kernel version. It is
+ recommended to upgrade to a kernel version with proper support as soon
+ as it is available.
+
+ Use "" to disable force probe. If in doubt, use this.
+
+ Use "<pci-id>[,<pci-id>,...]" to force probe the driver for listed
+ devices. For example, "4500" or "4500,4571".
+
+ Use "*" to force probe the driver for all known devices.
config DRM_I915_CAPTURE_ERROR
bool "Enable capturing GPU state following a hang"
@@ -133,3 +142,9 @@ depends on DRM_I915
depends on EXPERT
source "drivers/gpu/drm/i915/Kconfig.debug"
endmenu
+
+menu "drm/i915 Profile Guided Optimisation"
+ visible if EXPERT
+ depends on DRM_I915
+ source "drivers/gpu/drm/i915/Kconfig.profile"
+endmenu
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 04b686d2c2d0..8d922bb4d953 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -21,6 +21,7 @@ config DRM_I915_DEBUG
depends on DRM_I915
select DEBUG_FS
select PREEMPT_COUNT
+ select REFCOUNT_FULL
select I2C_CHARDEV
select STACKDEPOT
select DRM_DP_AUX_CHARDEV
@@ -32,6 +33,7 @@ config DRM_I915_DEBUG
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
select DRM_I915_DEBUG_RUNTIME_PM
+ select DRM_I915_DEBUG_MMIO
default n
help
Choose this option to turn on extra driver debugging that may affect
@@ -41,6 +43,19 @@ config DRM_I915_DEBUG
If in doubt, say "N".
+config DRM_I915_DEBUG_MMIO
+ bool "Always insert extra checks around mmio access by default"
+ default n
+ help
+ By default, always enables the extra sanity checks (extra register
+ reads) around every mmio (register) access that will slow the system
+ down. This sets the default value of i915.mmio_debug to -1 and can
+ be overridden at module load.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_I915_DEBUG_GEM
bool "Insert extra checks into the GEM internals"
default n
diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile
new file mode 100644
index 000000000000..48df8889a88a
--- /dev/null
+++ b/drivers/gpu/drm/i915/Kconfig.profile
@@ -0,0 +1,27 @@
+config DRM_I915_USERFAULT_AUTOSUSPEND
+ int "Runtime autosuspend delay for userspace GGTT mmaps (ms)"
+ default 250 # milliseconds
+ help
+ On runtime suspend, as we suspend the device, we have to revoke
+ userspace GGTT mmaps and force userspace to take a pagefault on
+ their next access. The revocation and subsequent recreation of
+ the GGTT mmap can be very slow and so we impose a small hysteris
+ that complements the runtime-pm autosuspend and provides a lower
+ floor on the autosuspend delay.
+
+ May be 0 to disable the extra delay and solely use the device level
+ runtime pm autosuspend delay tunable.
+
+config DRM_I915_SPIN_REQUEST
+ int "Busywait for request completion (us)"
+ default 5 # microseconds
+ help
+ Before sleeping waiting for a request (GPU operation) to complete,
+ we may spend some time polling for its completion. As the IRQ may
+ take a non-negligible time to setup, we do a short spin first to
+ check if the request will complete in the time it would have taken
+ us to enable the interrupt.
+
+ May be 0 to disable the initial spin. In practice, we estimate
+ the cost of enabling the interrupt (if currently disabled) to be
+ a few microseconds.
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index fbcb0904f4a8..91355c2ea8a5 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -35,52 +35,93 @@ subdir-ccflags-y += \
# Extra header tests
include $(src)/Makefile.header-test
+subdir-ccflags-y += -I$(src)
+
# Please keep these build lists sorted!
# core driver code
i915-y += i915_drv.o \
i915_irq.o \
- i915_memcpy.o \
- i915_mm.o \
i915_params.o \
i915_pci.o \
- i915_reset.o \
+ i915_scatterlist.o \
i915_suspend.o \
- i915_sw_fence.o \
- i915_syncmap.o \
i915_sysfs.o \
- i915_user_extensions.o \
intel_csr.o \
intel_device_info.o \
intel_pm.o \
intel_runtime_pm.o \
- intel_workarounds.o
+ intel_sideband.o \
+ intel_uncore.o \
+ intel_wakeref.o
+
+# core library code
+i915-y += \
+ i915_memcpy.o \
+ i915_mm.o \
+ i915_sw_fence.o \
+ i915_syncmap.o \
+ i915_user_extensions.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
-i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
+i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o display/intel_pipe_crc.o
i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
-# GEM code
+# "Graphics Technology" (aka we talk to the gpu)
+obj-y += gt/
+gt-y += \
+ gt/intel_breadcrumbs.o \
+ gt/intel_context.o \
+ gt/intel_engine_cs.o \
+ gt/intel_engine_pm.o \
+ gt/intel_gt_pm.o \
+ gt/intel_hangcheck.o \
+ gt/intel_lrc.o \
+ gt/intel_reset.o \
+ gt/intel_ringbuffer.o \
+ gt/intel_mocs.o \
+ gt/intel_sseu.o \
+ gt/intel_workarounds.o
+gt-$(CONFIG_DRM_I915_SELFTEST) += \
+ gt/mock_engine.o
+i915-y += $(gt-y)
+
+# GEM (Graphics Execution Management) code
+obj-y += gem/
+gem-y += \
+ gem/i915_gem_busy.o \
+ gem/i915_gem_clflush.o \
+ gem/i915_gem_client_blt.o \
+ gem/i915_gem_context.o \
+ gem/i915_gem_dmabuf.o \
+ gem/i915_gem_domain.o \
+ gem/i915_gem_execbuffer.o \
+ gem/i915_gem_fence.o \
+ gem/i915_gem_internal.o \
+ gem/i915_gem_object.o \
+ gem/i915_gem_object_blt.o \
+ gem/i915_gem_mman.o \
+ gem/i915_gem_pages.o \
+ gem/i915_gem_phys.o \
+ gem/i915_gem_pm.o \
+ gem/i915_gem_shmem.o \
+ gem/i915_gem_shrinker.o \
+ gem/i915_gem_stolen.o \
+ gem/i915_gem_throttle.o \
+ gem/i915_gem_tiling.o \
+ gem/i915_gem_userptr.o \
+ gem/i915_gem_wait.o \
+ gem/i915_gemfs.o
i915-y += \
+ $(gem-y) \
i915_active.o \
i915_cmd_parser.o \
i915_gem_batch_pool.o \
- i915_gem_clflush.o \
- i915_gem_context.o \
- i915_gem_dmabuf.o \
i915_gem_evict.o \
- i915_gem_execbuffer.o \
i915_gem_fence_reg.o \
i915_gem_gtt.o \
- i915_gem_internal.o \
i915_gem.o \
- i915_gem_object.o \
i915_gem_render_state.o \
- i915_gem_shrinker.o \
- i915_gem_stolen.o \
- i915_gem_tiling.o \
- i915_gem_userptr.o \
- i915_gemfs.o \
i915_globals.o \
i915_query.o \
i915_request.o \
@@ -88,14 +129,6 @@ i915-y += \
i915_timeline.o \
i915_trace_points.o \
i915_vma.o \
- intel_breadcrumbs.o \
- intel_context.o \
- intel_engine_cs.o \
- intel_hangcheck.o \
- intel_lrc.o \
- intel_mocs.o \
- intel_ringbuffer.o \
- intel_uncore.o \
intel_wopcm.o
# general-purpose microcontroller (GuC) support
@@ -117,62 +150,71 @@ i915-y += intel_renderstate_gen6.o \
intel_renderstate_gen9.o
# modesetting core code
-i915-y += intel_audio.o \
- intel_atomic.o \
- intel_atomic_plane.o \
- intel_bios.o \
- intel_cdclk.o \
- intel_color.o \
- intel_combo_phy.o \
- intel_connector.o \
- intel_display.o \
- intel_dpio_phy.o \
- intel_dpll_mgr.o \
- intel_fbc.o \
- intel_fifo_underrun.o \
- intel_frontbuffer.o \
- intel_hdcp.o \
- intel_hotplug.o \
- intel_overlay.o \
- intel_psr.o \
- intel_quirks.o \
- intel_sideband.o \
- intel_sprite.o
-i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
-i915-$(CONFIG_DRM_FBDEV_EMULATION) += intel_fbdev.o
+obj-y += display/
+i915-y += \
+ display/intel_atomic.o \
+ display/intel_atomic_plane.o \
+ display/intel_audio.o \
+ display/intel_bios.o \
+ display/intel_bw.o \
+ display/intel_cdclk.o \
+ display/intel_color.o \
+ display/intel_combo_phy.o \
+ display/intel_connector.o \
+ display/intel_display.o \
+ display/intel_display_power.o \
+ display/intel_dpio_phy.o \
+ display/intel_dpll_mgr.o \
+ display/intel_fbc.o \
+ display/intel_fifo_underrun.o \
+ display/intel_frontbuffer.o \
+ display/intel_hdcp.o \
+ display/intel_hotplug.o \
+ display/intel_lpe_audio.o \
+ display/intel_overlay.o \
+ display/intel_psr.o \
+ display/intel_quirks.o \
+ display/intel_sprite.o
+i915-$(CONFIG_ACPI) += \
+ display/intel_acpi.o \
+ display/intel_opregion.o
+i915-$(CONFIG_DRM_FBDEV_EMULATION) += \
+ display/intel_fbdev.o
# modesetting output/encoder code
-i915-y += dvo_ch7017.o \
- dvo_ch7xxx.o \
- dvo_ivch.o \
- dvo_ns2501.o \
- dvo_sil164.o \
- dvo_tfp410.o \
- icl_dsi.o \
- intel_crt.o \
- intel_ddi.o \
- intel_dp_aux_backlight.o \
- intel_dp_link_training.o \
- intel_dp_mst.o \
- intel_dp.o \
- intel_dsi.o \
- intel_dsi_dcs_backlight.o \
- intel_dsi_vbt.o \
- intel_dvo.o \
- intel_hdmi.o \
- intel_i2c.o \
- intel_lspcon.o \
- intel_lvds.o \
- intel_panel.o \
- intel_sdvo.o \
- intel_tv.o \
- vlv_dsi.o \
- vlv_dsi_pll.o \
- intel_vdsc.o
+i915-y += \
+ display/dvo_ch7017.o \
+ display/dvo_ch7xxx.o \
+ display/dvo_ivch.o \
+ display/dvo_ns2501.o \
+ display/dvo_sil164.o \
+ display/dvo_tfp410.o \
+ display/icl_dsi.o \
+ display/intel_crt.o \
+ display/intel_ddi.o \
+ display/intel_dp.o \
+ display/intel_dp_aux_backlight.o \
+ display/intel_dp_link_training.o \
+ display/intel_dp_mst.o \
+ display/intel_dsi.o \
+ display/intel_dsi_dcs_backlight.o \
+ display/intel_dsi_vbt.o \
+ display/intel_dvo.o \
+ display/intel_gmbus.o \
+ display/intel_hdmi.o \
+ display/intel_lspcon.o \
+ display/intel_lvds.o \
+ display/intel_panel.o \
+ display/intel_sdvo.o \
+ display/intel_tv.o \
+ display/intel_vdsc.o \
+ display/vlv_dsi.o \
+ display/vlv_dsi_pll.o
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
i915-$(CONFIG_DRM_I915_SELFTEST) += \
+ gem/selftests/igt_gem_utils.o \
selftests/i915_random.o \
selftests/i915_selftest.o \
selftests/igt_flush_test.o \
@@ -205,8 +247,5 @@ i915-y += intel_gvt.o
include $(src)/gvt/Makefile
endif
-# LPE Audio for VLV and CHT
-i915-y += intel_lpe_audio.o
-
obj-$(CONFIG_DRM_I915) += i915.o
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test
index 639b596a06a9..7cde0ec34615 100644
--- a/drivers/gpu/drm/i915/Makefile.header-test
+++ b/drivers/gpu/drm/i915/Makefile.header-test
@@ -4,34 +4,19 @@
# Test the headers are compilable as standalone units
header-test-$(CONFIG_DRM_I915_WERROR) := \
i915_active_types.h \
- i915_gem_context_types.h \
+ i915_debugfs.h \
+ i915_drv.h \
+ i915_irq.h \
+ i915_params.h \
i915_priolist_types.h \
+ i915_reg.h \
i915_scheduler_types.h \
i915_timeline_types.h \
- intel_atomic_plane.h \
- intel_audio.h \
- intel_cdclk.h \
- intel_color.h \
- intel_connector.h \
- intel_context_types.h \
- intel_crt.h \
+ i915_utils.h \
intel_csr.h \
- intel_ddi.h \
- intel_dp.h \
- intel_dvo.h \
- intel_engine_types.h \
- intel_fbc.h \
- intel_fbdev.h \
- intel_frontbuffer.h \
- intel_hdcp.h \
- intel_hdmi.h \
- intel_lspcon.h \
- intel_lvds.h \
- intel_panel.h \
- intel_pipe_crc.h \
+ intel_drv.h \
intel_pm.h \
- intel_psr.h \
- intel_sdvo.h \
- intel_sprite.h \
- intel_tv.h \
- intel_workarounds_types.h
+ intel_runtime_pm.h \
+ intel_sideband.h \
+ intel_uncore.h \
+ intel_wakeref.h
diff --git a/drivers/gpu/drm/i915/display/Makefile b/drivers/gpu/drm/i915/display/Makefile
new file mode 100644
index 000000000000..1c75b5c9790c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/Makefile
@@ -0,0 +1,2 @@
+# Extra header tests
+include $(src)/Makefile.header-test
diff --git a/drivers/gpu/drm/i915/display/Makefile.header-test b/drivers/gpu/drm/i915/display/Makefile.header-test
new file mode 100644
index 000000000000..fc7d4e5bd2c6
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/Makefile.header-test
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: MIT
+# Copyright © 2019 Intel Corporation
+
+# Test the headers are compilable as standalone units
+header_test := $(notdir $(filter-out %/intel_vbt_defs.h,$(wildcard $(src)/*.h)))
+
+quiet_cmd_header_test = HDRTEST $@
+ cmd_header_test = echo "\#include \"$(<F)\"" > $@
+
+header_test_%.c: %.h
+ $(call cmd,header_test)
+
+extra-$(CONFIG_DRM_I915_WERROR) += \
+ $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
+
+clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c
index caac9942e1e3..602380fe74f3 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c
@@ -25,7 +25,8 @@
*
*/
-#include "dvo.h"
+#include "intel_drv.h"
+#include "intel_dvo_dev.h"
#define CH7017_TV_DISPLAY_MODE 0x00
#define CH7017_FLICKER_FILTER 0x01
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
index 397ac5233726..e070bebee7b5 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
@@ -26,7 +26,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
-#include "dvo.h"
+#include "intel_drv.h"
+#include "intel_dvo_dev.h"
#define CH7xxx_REG_VID 0x4a
#define CH7xxx_REG_DID 0x4b
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c
index 24278cc49090..09dba35f3ffa 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/display/dvo_ivch.c
@@ -29,7 +29,8 @@
*
*/
-#include "dvo.h"
+#include "intel_drv.h"
+#include "intel_dvo_dev.h"
/*
* register definitions for the i82807aa.
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
index c584e01dc8dc..c83a5d88d62b 100644
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
@@ -26,9 +26,10 @@
*
*/
-#include "dvo.h"
-#include "i915_reg.h"
#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_drv.h"
+#include "intel_dvo_dev.h"
#define NS2501_VID 0x1305
#define NS2501_DID 0x6726
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c
index 4ae5d8fd9ff0..04698eaeb632 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/display/dvo_sil164.c
@@ -26,7 +26,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
-#include "dvo.h"
+#include "intel_drv.h"
+#include "intel_dvo_dev.h"
#define SIL164_VID 0x0001
#define SIL164_DID 0x0006
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c
index d603bc2f2506..623114ee73cd 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c
@@ -25,7 +25,8 @@
*
*/
-#include "dvo.h"
+#include "intel_drv.h"
+#include "intel_dvo_dev.h"
/* register definitions according to the TFP410 data sheet */
#define TFP410_VID 0x014C
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 9d962ea1e635..74448e6bf749 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -28,6 +28,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_mipi_dsi.h>
+#include "intel_atomic.h"
+#include "intel_combo_phy.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_dsi.h"
@@ -363,30 +365,10 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
- u32 tmp;
- u32 lane_mask;
-
- switch (intel_dsi->lane_count) {
- case 1:
- lane_mask = PWR_DOWN_LN_3_1_0;
- break;
- case 2:
- lane_mask = PWR_DOWN_LN_3_1;
- break;
- case 3:
- lane_mask = PWR_DOWN_LN_3;
- break;
- case 4:
- default:
- lane_mask = PWR_UP_ALL_LANES;
- break;
- }
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_PORT_CL_DW10(port));
- tmp &= ~PWR_DOWN_LN_MASK;
- I915_WRITE(ICL_PORT_CL_DW10(port), tmp | lane_mask);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_combo_phy_power_up_lanes(dev_priv, port, true,
+ intel_dsi->lane_count, false);
}
static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
@@ -1193,17 +1175,51 @@ static void gen11_dsi_disable(struct intel_encoder *encoder,
gen11_dsi_disable_io_power(encoder);
}
+static void gen11_dsi_get_timings(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+
+ if (intel_dsi->dual_link) {
+ adjusted_mode->crtc_hdisplay *= 2;
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ adjusted_mode->crtc_hdisplay -=
+ intel_dsi->pixel_overlap;
+ adjusted_mode->crtc_htotal *= 2;
+ }
+ adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay;
+ adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal;
+
+ if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) {
+ if (intel_dsi->dual_link) {
+ adjusted_mode->crtc_hsync_start *= 2;
+ adjusted_mode->crtc_hsync_end *= 2;
+ }
+ }
+ adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay;
+ adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal;
+}
+
static void gen11_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
pipe_config->port_clock =
cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state);
+
pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk;
+ if (intel_dsi->dual_link)
+ pipe_config->base.adjusted_mode.crtc_clock *= 2;
+
+ gen11_dsi_get_timings(encoder, pipe_config);
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
+ pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
}
static int gen11_dsi_compute_config(struct intel_encoder *encoder,
@@ -1219,6 +1235,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
intel_pch_panel_fitting(crtc, pipe_config, conn_state->scaling_mode);
@@ -1363,6 +1380,113 @@ static const struct mipi_dsi_host_ops gen11_dsi_host_ops = {
.transfer = gen11_dsi_host_transfer,
};
+#define ICL_PREPARE_CNT_MAX 0x7
+#define ICL_CLK_ZERO_CNT_MAX 0xf
+#define ICL_TRAIL_CNT_MAX 0x7
+#define ICL_TCLK_PRE_CNT_MAX 0x3
+#define ICL_TCLK_POST_CNT_MAX 0x7
+#define ICL_HS_ZERO_CNT_MAX 0xf
+#define ICL_EXIT_ZERO_CNT_MAX 0x7
+
+static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ u32 tlpx_ns;
+ u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
+ u32 ths_prepare_ns, tclk_trail_ns;
+ u32 hs_zero_cnt;
+ u32 tclk_pre_cnt, tclk_post_cnt;
+
+ tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
+
+ tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
+ ths_prepare_ns = max(mipi_config->ths_prepare,
+ mipi_config->tclk_prepare);
+
+ /*
+ * prepare cnt in escape clocks
+ * this field represents a hexadecimal value with a precision
+ * of 1.2 – i.e. the most significant bit is the integer
+ * and the least significant 2 bits are fraction bits.
+ * so, the field can represent a range of 0.25 to 1.75
+ */
+ prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
+ if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
+ DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt);
+ prepare_cnt = ICL_PREPARE_CNT_MAX;
+ }
+
+ /* clk zero count in escape clocks */
+ clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
+ ths_prepare_ns, tlpx_ns);
+ if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
+ clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
+ }
+
+ /* trail cnt in escape clocks*/
+ trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
+ if (trail_cnt > ICL_TRAIL_CNT_MAX) {
+ DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt);
+ trail_cnt = ICL_TRAIL_CNT_MAX;
+ }
+
+ /* tclk pre count in escape clocks */
+ tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
+ if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
+ DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
+ tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
+ }
+
+ /* tclk post count in escape clocks */
+ tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns);
+ if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) {
+ DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt);
+ tclk_post_cnt = ICL_TCLK_POST_CNT_MAX;
+ }
+
+ /* hs zero cnt in escape clocks */
+ hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
+ ths_prepare_ns, tlpx_ns);
+ if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt);
+ hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
+ }
+
+ /* hs exit zero cnt in escape clocks */
+ exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
+ if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt);
+ exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
+ }
+
+ /* clock lane dphy timings */
+ intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE |
+ CLK_PREPARE(prepare_cnt) |
+ CLK_ZERO_OVERRIDE |
+ CLK_ZERO(clk_zero_cnt) |
+ CLK_PRE_OVERRIDE |
+ CLK_PRE(tclk_pre_cnt) |
+ CLK_POST_OVERRIDE |
+ CLK_POST(tclk_post_cnt) |
+ CLK_TRAIL_OVERRIDE |
+ CLK_TRAIL(trail_cnt));
+
+ /* data lanes dphy timings */
+ intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE |
+ HS_PREPARE(prepare_cnt) |
+ HS_ZERO_OVERRIDE |
+ HS_ZERO(hs_zero_cnt) |
+ HS_TRAIL_OVERRIDE |
+ HS_TRAIL(trail_cnt) |
+ HS_EXIT_OVERRIDE |
+ HS_EXIT(exit_zero_cnt));
+
+ intel_dsi_log_params(intel_dsi);
+}
+
void icl_dsi_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
@@ -1455,6 +1579,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
goto err;
}
+ icl_dphy_param_init(intel_dsi);
return;
err:
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index 9d142d038a7d..3456d33feb46 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -4,9 +4,12 @@
*
* _DSM related code stolen from nouveau_acpi.c.
*/
+
#include <linux/pci.h>
#include <linux/acpi.h>
+
#include "i915_drv.h"
+#include "intel_acpi.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.h b/drivers/gpu/drm/i915/display/intel_acpi.h
new file mode 100644
index 000000000000..1c576b3fb712
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_acpi.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_ACPI_H__
+#define __INTEL_ACPI_H__
+
+#ifdef CONFIG_ACPI
+void intel_register_dsm_handler(void);
+void intel_unregister_dsm_handler(void);
+#else
+static inline void intel_register_dsm_handler(void) { return; }
+static inline void intel_unregister_dsm_handler(void) { return; }
+#endif /* CONFIG_ACPI */
+
+#endif /* __INTEL_ACPI_H__ */
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 8c8fae32ec50..90ca11a4ae88 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -34,6 +34,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+#include "intel_atomic.h"
#include "intel_drv.h"
#include "intel_hdcp.h"
#include "intel_sprite.h"
@@ -104,13 +105,25 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
return -EINVAL;
}
+static bool blob_equal(const struct drm_property_blob *a,
+ const struct drm_property_blob *b)
+{
+ if (a && b)
+ return a->length == b->length &&
+ !memcmp(a->data, b->data, a->length);
+
+ return !a == !b;
+}
+
int intel_digital_connector_atomic_check(struct drm_connector *conn,
- struct drm_connector_state *new_state)
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *new_state =
+ drm_atomic_get_new_connector_state(state, conn);
struct intel_digital_connector_state *new_conn_state =
to_intel_digital_connector_state(new_state);
struct drm_connector_state *old_state =
- drm_atomic_get_old_connector_state(new_state->state, conn);
+ drm_atomic_get_old_connector_state(state, conn);
struct intel_digital_connector_state *old_conn_state =
to_intel_digital_connector_state(old_state);
struct drm_crtc_state *crtc_state;
@@ -120,7 +133,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
if (!new_state->crtc)
return 0;
- crtc_state = drm_atomic_get_new_crtc_state(new_state->state, new_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
/*
* These properties are handled by fastset, and might not end
@@ -131,7 +144,9 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
new_conn_state->base.colorspace != old_conn_state->base.colorspace ||
new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
new_conn_state->base.content_type != old_conn_state->base.content_type ||
- new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode)
+ new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode ||
+ !blob_equal(new_conn_state->base.hdr_output_metadata,
+ old_conn_state->base.hdr_output_metadata))
crtc_state->mode_changed = true;
return 0;
@@ -411,3 +426,15 @@ void intel_atomic_state_clear(struct drm_atomic_state *s)
drm_atomic_state_default_clear(&state->base);
state->dpll_set = state->modeset = false;
}
+
+struct intel_crtc_state *
+intel_atomic_get_crtc_state(struct drm_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_crtc_state *crtc_state;
+ crtc_state = drm_atomic_get_crtc_state(state, &crtc->base);
+ if (IS_ERR(crtc_state))
+ return ERR_CAST(crtc_state);
+
+ return to_intel_crtc_state(crtc_state);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
new file mode 100644
index 000000000000..58065d3161a3
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_ATOMIC_H__
+#define __INTEL_ATOMIC_H__
+
+#include <linux/types.h>
+
+struct drm_atomic_state;
+struct drm_connector;
+struct drm_connector_state;
+struct drm_crtc;
+struct drm_crtc_state;
+struct drm_device;
+struct drm_i915_private;
+struct drm_property;
+struct intel_crtc;
+struct intel_crtc_state;
+
+int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ u64 *val);
+int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ u64 val);
+int intel_digital_connector_atomic_check(struct drm_connector *conn,
+ struct drm_atomic_state *state);
+struct drm_connector_state *
+intel_digital_connector_duplicate_state(struct drm_connector *connector);
+
+struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
+void intel_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
+void intel_atomic_state_clear(struct drm_atomic_state *state);
+
+struct intel_crtc_state *
+intel_atomic_get_crtc_state(struct drm_atomic_state *state,
+ struct intel_crtc *crtc);
+
+int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
+ struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_ATOMIC_H__ */
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index d11681d71add..30bd4e76fff9 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -114,6 +114,29 @@ intel_plane_destroy_state(struct drm_plane *plane,
drm_atomic_helper_plane_destroy_state(plane, state);
}
+unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int cpp;
+
+ if (!plane_state->base.visible)
+ return 0;
+
+ cpp = fb->format->cpp[0];
+
+ /*
+ * Based on HSD#:1408715493
+ * NV12 cpp == 4, P010 cpp == 8
+ *
+ * FIXME what is the logic behind this?
+ */
+ if (fb->format->is_yuv && fb->format->num_planes > 1)
+ cpp *= 4;
+
+ return cpp * crtc_state->pixel_rate;
+}
+
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
@@ -125,6 +148,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
new_crtc_state->active_planes &= ~BIT(plane->id);
new_crtc_state->nv12_planes &= ~BIT(plane->id);
new_crtc_state->c8_planes &= ~BIT(plane->id);
+ new_crtc_state->data_rate[plane->id] = 0;
new_plane_state->base.visible = false;
if (!new_plane_state->base.crtc && !old_plane_state->base.crtc)
@@ -149,6 +173,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
if (new_plane_state->base.visible || old_plane_state->base.visible)
new_crtc_state->update_planes |= BIT(plane->id);
+ new_crtc_state->data_rate[plane->id] =
+ intel_plane_data_rate(new_crtc_state, new_plane_state);
+
return intel_plane_atomic_calc_changes(old_crtc_state,
&new_crtc_state->base,
old_plane_state,
@@ -326,48 +353,3 @@ const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
.cleanup_fb = intel_cleanup_plane_fb,
.atomic_check = intel_plane_atomic_check,
};
-
-/**
- * intel_plane_atomic_get_property - fetch plane property value
- * @plane: plane to fetch property for
- * @state: state containing the property value
- * @property: property to look up
- * @val: pointer to write property value into
- *
- * The DRM core does not store shadow copies of properties for
- * atomic-capable drivers. This entrypoint is used to fetch
- * the current value of a driver-specific plane property.
- */
-int
-intel_plane_atomic_get_property(struct drm_plane *plane,
- const struct drm_plane_state *state,
- struct drm_property *property,
- u64 *val)
-{
- DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
- property->base.id, property->name);
- return -EINVAL;
-}
-
-/**
- * intel_plane_atomic_set_property - set plane property value
- * @plane: plane to set property for
- * @state: state to update property value in
- * @property: property to set
- * @val: value to set property to
- *
- * Writes the specified property value for a plane into the provided atomic
- * state object.
- *
- * Returns 0 on success, -EINVAL on unrecognized properties
- */
-int
-intel_plane_atomic_set_property(struct drm_plane *plane,
- struct drm_plane_state *state,
- struct drm_property *property,
- u64 val)
-{
- DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
- property->base.id, property->name);
- return -EINVAL;
-}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 14678620440f..1437a8797e10 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -6,7 +6,11 @@
#ifndef __INTEL_ATOMIC_PLANE_H__
#define __INTEL_ATOMIC_PLANE_H__
+#include <linux/types.h>
+
+struct drm_crtc_state;
struct drm_plane;
+struct drm_property;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -15,6 +19,8 @@ struct intel_plane_state;
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
void intel_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
@@ -36,5 +42,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
+int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
+ struct drm_crtc_state *crtc_state,
+ const struct intel_plane_state *old_plane_state,
+ struct drm_plane_state *plane_state);
#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index bca4cc025d3d..840daff12246 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -26,11 +26,11 @@
#include <drm/drm_edid.h>
#include <drm/i915_component.h>
-#include <drm/intel_lpe_audio.h>
#include "i915_drv.h"
#include "intel_audio.h"
#include "intel_drv.h"
+#include "intel_lpe_audio.h"
/**
* DOC: High Definition Audio over HDMI and Display Port
@@ -319,9 +319,8 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
- enum pipe pipe = crtc->pipe;
const struct dp_aud_n_m *nm;
int rate;
u32 tmp;
@@ -333,7 +332,7 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
else
DRM_DEBUG_KMS("using automatic Maud, Naud\n");
- tmp = I915_READ(HSW_AUD_CFG(pipe));
+ tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
@@ -345,9 +344,9 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
tmp |= AUD_CONFIG_N_PROG_ENABLE;
}
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+ I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
- tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(pipe));
+ tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
tmp &= ~AUD_CONFIG_M_MASK;
tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
@@ -358,7 +357,7 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
tmp |= AUD_M_CTS_M_PROG_ENABLE;
}
- I915_WRITE(HSW_AUD_M_CTS_ENABLE(pipe), tmp);
+ I915_WRITE(HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
}
static void
@@ -367,15 +366,14 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
- enum pipe pipe = crtc->pipe;
int n, rate;
u32 tmp;
rate = acomp ? acomp->aud_sample_rate[port] : 0;
- tmp = I915_READ(HSW_AUD_CFG(pipe));
+ tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
@@ -392,16 +390,16 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
DRM_DEBUG_KMS("using automatic N\n");
}
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+ I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
/*
* Let's disable "Enable CTS or M Prog bit"
* and let HW calculate the value
*/
- tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(pipe));
+ tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
- I915_WRITE(HSW_AUD_M_CTS_ENABLE(pipe), tmp);
+ I915_WRITE(HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
}
static void
@@ -419,28 +417,28 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
- enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
u32 tmp;
- DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
+ DRM_DEBUG_KMS("Disable audio codec on transcoder %s\n",
+ transcoder_name(cpu_transcoder));
mutex_lock(&dev_priv->av_mutex);
/* Disable timestamps */
- tmp = I915_READ(HSW_AUD_CFG(pipe));
+ tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
if (intel_crtc_has_dp_encoder(old_crtc_state))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+ I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
/* Invalidate ELD */
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- tmp &= ~AUDIO_ELD_VALID(pipe);
- tmp &= ~AUDIO_OUTPUT_ENABLE(pipe);
+ tmp &= ~AUDIO_ELD_VALID(cpu_transcoder);
+ tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder);
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
mutex_unlock(&dev_priv->av_mutex);
@@ -451,22 +449,21 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_connector *connector = conn_state->connector;
- enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
const u8 *eld = connector->eld;
u32 tmp;
int len, i;
- DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
- pipe_name(pipe), drm_eld_size(eld));
+ DRM_DEBUG_KMS("Enable audio codec on transcoder %s, %u bytes ELD\n",
+ transcoder_name(cpu_transcoder), drm_eld_size(eld));
mutex_lock(&dev_priv->av_mutex);
/* Enable audio presence detect, invalidate ELD */
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- tmp |= AUDIO_OUTPUT_ENABLE(pipe);
- tmp &= ~AUDIO_ELD_VALID(pipe);
+ tmp |= AUDIO_OUTPUT_ENABLE(cpu_transcoder);
+ tmp &= ~AUDIO_ELD_VALID(cpu_transcoder);
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
/*
@@ -477,18 +474,18 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
*/
/* Reset ELD write address */
- tmp = I915_READ(HSW_AUD_DIP_ELD_CTRL(pipe));
+ tmp = I915_READ(HSW_AUD_DIP_ELD_CTRL(cpu_transcoder));
tmp &= ~IBX_ELD_ADDRESS_MASK;
- I915_WRITE(HSW_AUD_DIP_ELD_CTRL(pipe), tmp);
+ I915_WRITE(HSW_AUD_DIP_ELD_CTRL(cpu_transcoder), tmp);
/* Up to 84 bytes of hw ELD buffer */
len = min(drm_eld_size(eld), 84);
for (i = 0; i < len / 4; i++)
- I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((const u32 *)eld + i));
+ I915_WRITE(HSW_AUD_EDID_DATA(cpu_transcoder), *((const u32 *)eld + i));
/* ELD valid */
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- tmp |= AUDIO_ELD_VALID(pipe);
+ tmp |= AUDIO_ELD_VALID(cpu_transcoder);
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
/* Enable timestamps */
@@ -644,8 +641,10 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
+ /* FIXME precompute the ELD in .compute_config() */
if (!connector->eld[0])
- return;
+ DRM_DEBUG_KMS("Bogus ELD on [CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id,
diff --git a/drivers/gpu/drm/i915/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
index a3657c7a7ba2..a3657c7a7ba2 100644
--- a/drivers/gpu/drm/i915/intel_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_audio.h
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 1dc8d03ff127..c4710889cb32 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -27,6 +27,9 @@
#include <drm/drm_dp_helper.h>
#include <drm/i915_drm.h>
+
+#include "display/intel_gmbus.h"
+
#include "i915_drv.h"
#define _INTEL_BIOS_PRIVATE
@@ -74,13 +77,13 @@ static u32 get_blocksize(const void *block_data)
}
static const void *
-find_section(const void *_bdb, int section_id)
+find_section(const void *_bdb, enum bdb_block_id section_id)
{
const struct bdb_header *bdb = _bdb;
const u8 *base = _bdb;
int index = 0;
u32 total, current_size;
- u8 current_id;
+ enum bdb_block_id current_id;
/* skip to first section */
index += bdb->header_size;
@@ -300,7 +303,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
const struct bdb_lfp_backlight_data *backlight_data;
- const struct bdb_lfp_backlight_data_entry *entry;
+ const struct lfp_backlight_data_entry *entry;
int panel_type = dev_priv->vbt.panel_type;
backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
@@ -325,7 +328,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
if (bdb->version >= 191 &&
get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
- const struct bdb_lfp_backlight_control_method *method;
+ const struct lfp_backlight_control_method *method;
method = &backlight_data->backlight_control[panel_type];
dev_priv->vbt.backlight.type = method->type;
@@ -349,7 +352,7 @@ static void
parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
- const struct lvds_dvo_timing *dvo_timing;
+ const struct bdb_sdvo_panel_dtds *dtds;
struct drm_display_mode *panel_fixed_mode;
int index;
@@ -369,15 +372,15 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
index = sdvo_lvds_options->panel_type;
}
- dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
- if (!dvo_timing)
+ dtds = find_section(bdb, BDB_SDVO_PANEL_DTDS);
+ if (!dtds)
return;
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
if (!panel_fixed_mode)
return;
- fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
+ fill_detail_timing_data(panel_fixed_mode, &dtds->dtds[index]);
dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
@@ -1237,27 +1240,36 @@ static u8 translate_iboost(u8 val)
return mapping[val];
}
+static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
+{
+ const struct ddi_vbt_port_info *info;
+ enum port port;
+
+ for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+ info = &i915->vbt.ddi_port_info[port];
+
+ if (info->child && ddc_pin == info->alternate_ddc_pin)
+ return port;
+ }
+
+ return PORT_NONE;
+}
+
static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
enum port port)
{
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[port];
+ struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
enum port p;
if (!info->alternate_ddc_pin)
return;
- for (p = PORT_A; p < I915_MAX_PORTS; p++) {
- struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
-
- if (p == port || !i->present ||
- info->alternate_ddc_pin != i->alternate_ddc_pin)
- continue;
-
+ p = get_port_by_ddc_pin(dev_priv, info->alternate_ddc_pin);
+ if (p != PORT_NONE) {
DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
"disabling port %c DVI/HDMI support\n",
- port_name(p), i->alternate_ddc_pin,
- port_name(port), port_name(p));
+ port_name(port), info->alternate_ddc_pin,
+ port_name(p), port_name(port));
/*
* If we have multiple ports supposedly sharing the
@@ -1265,36 +1277,45 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same ddc bin and
* system couldn't communicate with them separately.
*
- * Due to parsing the ports in child device order,
- * a later device will always clobber an earlier one.
+ * Give child device order the priority, first come first
+ * served.
*/
- i->supports_dvi = false;
- i->supports_hdmi = false;
- i->alternate_ddc_pin = 0;
+ info->supports_dvi = false;
+ info->supports_hdmi = false;
+ info->alternate_ddc_pin = 0;
}
}
+static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
+{
+ const struct ddi_vbt_port_info *info;
+ enum port port;
+
+ for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+ info = &i915->vbt.ddi_port_info[port];
+
+ if (info->child && aux_ch == info->alternate_aux_channel)
+ return port;
+ }
+
+ return PORT_NONE;
+}
+
static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
enum port port)
{
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[port];
+ struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
enum port p;
if (!info->alternate_aux_channel)
return;
- for (p = PORT_A; p < I915_MAX_PORTS; p++) {
- struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
-
- if (p == port || !i->present ||
- info->alternate_aux_channel != i->alternate_aux_channel)
- continue;
-
+ p = get_port_by_aux_ch(dev_priv, info->alternate_aux_channel);
+ if (p != PORT_NONE) {
DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
"disabling port %c DP support\n",
- port_name(p), i->alternate_aux_channel,
- port_name(port), port_name(p));
+ port_name(port), info->alternate_aux_channel,
+ port_name(p), port_name(port));
/*
* If we have multiple ports supposedlt sharing the
@@ -1302,11 +1323,11 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same aux channel
* and system couldn't communicate with them separately.
*
- * Due to parsing the ports in child device order,
- * a later device will always clobber an earlier one.
+ * Give child device order the priority, first come first
+ * served.
*/
- i->supports_dp = false;
- i->alternate_aux_channel = 0;
+ info->supports_dp = false;
+ info->alternate_aux_channel = 0;
}
}
@@ -1327,12 +1348,21 @@ static const u8 icp_ddc_pin_map[] = {
[ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
};
+static const u8 mcc_ddc_pin_map[] = {
+ [MCC_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [MCC_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [MCC_DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP,
+};
+
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
{
const u8 *ddc_pin_map;
int n_entries;
- if (HAS_PCH_ICP(dev_priv)) {
+ if (HAS_PCH_MCC(dev_priv)) {
+ ddc_pin_map = mcc_ddc_pin_map;
+ n_entries = ARRAY_SIZE(mcc_ddc_pin_map);
+ } else if (HAS_PCH_ICP(dev_priv)) {
ddc_pin_map = icp_ddc_pin_map;
n_entries = ARRAY_SIZE(icp_ddc_pin_map);
} else if (HAS_PCH_CNP(dev_priv)) {
@@ -1395,14 +1425,12 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
info = &dev_priv->vbt.ddi_port_info[port];
- if (info->present) {
+ if (info->child) {
DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
port_name(port));
return;
}
- info->present = true;
-
is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
is_crt = child->device_type & DEVICE_TYPE_ANALOG_OUTPUT;
@@ -1427,8 +1455,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
if (bdb_version >= 209)
info->supports_tbt = child->tbt;
- DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d TCUSB:%d TBT:%d\n",
- port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt,
+ DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d\n",
+ port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
+ HAS_LSPCON(dev_priv) && child->lspcon,
info->supports_typec_usb, info->supports_tbt);
if (is_edp && is_dvi)
@@ -1530,6 +1559,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("VBT DP max link rate for port %c: %d\n",
port_name(port), info->dp_max_link_rate);
}
+
+ info->child = child;
}
static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
@@ -2149,106 +2180,39 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
/**
* intel_bios_is_port_hpd_inverted - is HPD inverted for %port
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
* @port: port to check
*
* Return true if HPD should be inverted for %port.
*/
bool
-intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
+intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
enum port port)
{
- const struct child_device_config *child;
- int i;
+ const struct child_device_config *child =
+ i915->vbt.ddi_port_info[port].child;
- if (WARN_ON_ONCE(!IS_GEN9_LP(dev_priv)))
+ if (WARN_ON_ONCE(!IS_GEN9_LP(i915)))
return false;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
-
- if (!child->hpd_invert)
- continue;
-
- switch (child->dvo_port) {
- case DVO_PORT_DPA:
- case DVO_PORT_HDMIA:
- if (port == PORT_A)
- return true;
- break;
- case DVO_PORT_DPB:
- case DVO_PORT_HDMIB:
- if (port == PORT_B)
- return true;
- break;
- case DVO_PORT_DPC:
- case DVO_PORT_HDMIC:
- if (port == PORT_C)
- return true;
- break;
- default:
- break;
- }
- }
-
- return false;
+ return child && child->hpd_invert;
}
/**
* intel_bios_is_lspcon_present - if LSPCON is attached on %port
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
* @port: port to check
*
* Return true if LSPCON is present on this port
*/
bool
-intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
- enum port port)
+intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
+ enum port port)
{
- const struct child_device_config *child;
- int i;
-
- if (!HAS_LSPCON(dev_priv))
- return false;
-
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
-
- if (!child->lspcon)
- continue;
+ const struct child_device_config *child =
+ i915->vbt.ddi_port_info[port].child;
- switch (child->dvo_port) {
- case DVO_PORT_DPA:
- case DVO_PORT_HDMIA:
- if (port == PORT_A)
- return true;
- break;
- case DVO_PORT_DPB:
- case DVO_PORT_HDMIB:
- if (port == PORT_B)
- return true;
- break;
- case DVO_PORT_DPC:
- case DVO_PORT_HDMIC:
- if (port == PORT_C)
- return true;
- break;
- case DVO_PORT_DPD:
- case DVO_PORT_HDMID:
- if (port == PORT_D)
- return true;
- break;
- case DVO_PORT_DPF:
- case DVO_PORT_HDMIF:
- if (port == PORT_F)
- return true;
- break;
- default:
- break;
- }
- }
-
- return false;
+ return HAS_LSPCON(i915) && child && child->lspcon;
}
enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 7e3545f65257..4e42cfaf61a7 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -30,6 +30,12 @@
#ifndef _INTEL_BIOS_H_
#define _INTEL_BIOS_H_
+#include <linux/types.h>
+
+#include <drm/i915_drm.h>
+
+struct drm_i915_private;
+
enum intel_backlight_type {
INTEL_BACKLIGHT_PMIC,
INTEL_BACKLIGHT_LPSS,
@@ -220,4 +226,19 @@ struct mipi_pps_data {
u16 panel_power_cycle_delay;
} __packed;
+void intel_bios_init(struct drm_i915_private *dev_priv);
+void intel_bios_cleanup(struct drm_i915_private *dev_priv);
+bool intel_bios_is_valid_vbt(const void *buf, size_t size);
+bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
+bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
+bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
+ enum port port);
+bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
+ enum port port);
+enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
+
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
new file mode 100644
index 000000000000..753ac3165061
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <drm/drm_atomic_state_helper.h>
+
+#include "intel_bw.h"
+#include "intel_drv.h"
+#include "intel_sideband.h"
+
+/* Parameters for Qclk Geyserville (QGV) */
+struct intel_qgv_point {
+ u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd;
+};
+
+struct intel_qgv_info {
+ struct intel_qgv_point points[3];
+ u8 num_points;
+ u8 num_channels;
+ u8 t_bl;
+ enum intel_dram_type dram_type;
+};
+
+static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
+ struct intel_qgv_info *qi)
+{
+ u32 val = 0;
+ int ret;
+
+ ret = sandybridge_pcode_read(dev_priv,
+ ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
+ &val, NULL);
+ if (ret)
+ return ret;
+
+ switch (val & 0xf) {
+ case 0:
+ qi->dram_type = INTEL_DRAM_DDR4;
+ break;
+ case 1:
+ qi->dram_type = INTEL_DRAM_DDR3;
+ break;
+ case 2:
+ qi->dram_type = INTEL_DRAM_LPDDR3;
+ break;
+ case 3:
+ qi->dram_type = INTEL_DRAM_LPDDR3;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ break;
+ }
+
+ qi->num_channels = (val & 0xf0) >> 4;
+ qi->num_points = (val & 0xf00) >> 8;
+
+ qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
+
+ return 0;
+}
+
+static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
+ struct intel_qgv_point *sp,
+ int point)
+{
+ u32 val = 0, val2;
+ int ret;
+
+ ret = sandybridge_pcode_read(dev_priv,
+ ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
+ &val, &val2);
+ if (ret)
+ return ret;
+
+ sp->dclk = val & 0xffff;
+ sp->t_rp = (val & 0xff0000) >> 16;
+ sp->t_rcd = (val & 0xff000000) >> 24;
+
+ sp->t_rdpre = val2 & 0xff;
+ sp->t_ras = (val2 & 0xff00) >> 8;
+
+ sp->t_rc = sp->t_rp + sp->t_ras;
+
+ return 0;
+}
+
+static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
+ struct intel_qgv_info *qi)
+{
+ int i, ret;
+
+ ret = icl_pcode_read_mem_global_info(dev_priv, qi);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(qi->num_points > ARRAY_SIZE(qi->points)))
+ qi->num_points = ARRAY_SIZE(qi->points);
+
+ for (i = 0; i < qi->num_points; i++) {
+ struct intel_qgv_point *sp = &qi->points[i];
+
+ ret = icl_pcode_read_qgv_point_info(dev_priv, sp, i);
+ if (ret)
+ return ret;
+
+ DRM_DEBUG_KMS("QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
+ i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
+ sp->t_rcd, sp->t_rc);
+ }
+
+ return 0;
+}
+
+static int icl_calc_bw(int dclk, int num, int den)
+{
+ /* multiples of 16.666MHz (100/6) */
+ return DIV_ROUND_CLOSEST(num * dclk * 100, den * 6);
+}
+
+static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
+{
+ u16 dclk = 0;
+ int i;
+
+ for (i = 0; i < qi->num_points; i++)
+ dclk = max(dclk, qi->points[i].dclk);
+
+ return dclk;
+}
+
+struct intel_sa_info {
+ u8 deburst, mpagesize, deprogbwlimit, displayrtids;
+};
+
+static const struct intel_sa_info icl_sa_info = {
+ .deburst = 8,
+ .mpagesize = 16,
+ .deprogbwlimit = 25, /* GB/s */
+ .displayrtids = 128,
+};
+
+static int icl_get_bw_info(struct drm_i915_private *dev_priv)
+{
+ struct intel_qgv_info qi = {};
+ const struct intel_sa_info *sa = &icl_sa_info;
+ bool is_y_tile = true; /* assume y tile may be used */
+ int num_channels;
+ int deinterleave;
+ int ipqdepth, ipqdepthpch;
+ int dclk_max;
+ int maxdebw;
+ int i, ret;
+
+ ret = icl_get_qgv_points(dev_priv, &qi);
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to get memory subsystem information, ignoring bandwidth limits");
+ return ret;
+ }
+ num_channels = qi.num_channels;
+
+ deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
+ dclk_max = icl_sagv_max_dclk(&qi);
+
+ ipqdepthpch = 16;
+
+ maxdebw = min(sa->deprogbwlimit * 1000,
+ icl_calc_bw(dclk_max, 16, 1) * 6 / 10); /* 60% */
+ ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
+
+ for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
+ struct intel_bw_info *bi = &dev_priv->max_bw[i];
+ int clpchgroup;
+ int j;
+
+ clpchgroup = (sa->deburst * deinterleave / num_channels) << i;
+ bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
+
+ for (j = 0; j < qi.num_points; j++) {
+ const struct intel_qgv_point *sp = &qi.points[j];
+ int ct, bw;
+
+ /*
+ * Max row cycle time
+ *
+ * FIXME what is the logic behind the
+ * assumed burst length?
+ */
+ ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
+ (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
+ bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct);
+
+ bi->deratedbw[j] = min(maxdebw,
+ bw * 9 / 10); /* 90% */
+
+ DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%d\n",
+ i, j, bi->num_planes, bi->deratedbw[j]);
+ }
+
+ if (bi->num_planes == 1)
+ break;
+ }
+
+ return 0;
+}
+
+static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
+ int num_planes, int qgv_point)
+{
+ int i;
+
+ /* Did we initialize the bw limits successfully? */
+ if (dev_priv->max_bw[0].num_planes == 0)
+ return UINT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
+ const struct intel_bw_info *bi =
+ &dev_priv->max_bw[i];
+
+ if (num_planes >= bi->num_planes)
+ return bi->deratedbw[qgv_point];
+ }
+
+ return 0;
+}
+
+void intel_bw_init_hw(struct drm_i915_private *dev_priv)
+{
+ if (IS_GEN(dev_priv, 11))
+ icl_get_bw_info(dev_priv);
+}
+
+static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv,
+ int num_planes)
+{
+ if (IS_GEN(dev_priv, 11))
+ /*
+ * FIXME with SAGV disabled maybe we can assume
+ * point 1 will always be used? Seems to match
+ * the behaviour observed in the wild.
+ */
+ return min3(icl_max_bw(dev_priv, num_planes, 0),
+ icl_max_bw(dev_priv, num_planes, 1),
+ icl_max_bw(dev_priv, num_planes, 2));
+ else
+ return UINT_MAX;
+}
+
+static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * We assume cursors are small enough
+ * to not not cause bandwidth problems.
+ */
+ return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
+}
+
+static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ unsigned int data_rate = 0;
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ /*
+ * We assume cursors are small enough
+ * to not not cause bandwidth problems.
+ */
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ data_rate += crtc_state->data_rate[plane_id];
+ }
+
+ return data_rate;
+}
+
+void intel_bw_crtc_update(struct intel_bw_state *bw_state,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ bw_state->data_rate[crtc->pipe] =
+ intel_bw_crtc_data_rate(crtc_state);
+ bw_state->num_active_planes[crtc->pipe] =
+ intel_bw_crtc_num_active_planes(crtc_state);
+
+ DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
+ pipe_name(crtc->pipe),
+ bw_state->data_rate[crtc->pipe],
+ bw_state->num_active_planes[crtc->pipe]);
+}
+
+static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
+ const struct intel_bw_state *bw_state)
+{
+ unsigned int num_active_planes = 0;
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe)
+ num_active_planes += bw_state->num_active_planes[pipe];
+
+ return num_active_planes;
+}
+
+static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
+ const struct intel_bw_state *bw_state)
+{
+ unsigned int data_rate = 0;
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe)
+ data_rate += bw_state->data_rate[pipe];
+
+ return data_rate;
+}
+
+int intel_bw_atomic_check(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct intel_bw_state *bw_state = NULL;
+ unsigned int data_rate, max_data_rate;
+ unsigned int num_active_planes;
+ struct intel_crtc *crtc;
+ int i;
+
+ /* FIXME earlier gens need some checks too */
+ if (INTEL_GEN(dev_priv) < 11)
+ return 0;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ unsigned int old_data_rate =
+ intel_bw_crtc_data_rate(old_crtc_state);
+ unsigned int new_data_rate =
+ intel_bw_crtc_data_rate(new_crtc_state);
+ unsigned int old_active_planes =
+ intel_bw_crtc_num_active_planes(old_crtc_state);
+ unsigned int new_active_planes =
+ intel_bw_crtc_num_active_planes(new_crtc_state);
+
+ /*
+ * Avoid locking the bw state when
+ * nothing significant has changed.
+ */
+ if (old_data_rate == new_data_rate &&
+ old_active_planes == new_active_planes)
+ continue;
+
+ bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(bw_state))
+ return PTR_ERR(bw_state);
+
+ bw_state->data_rate[crtc->pipe] = new_data_rate;
+ bw_state->num_active_planes[crtc->pipe] = new_active_planes;
+
+ DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
+ pipe_name(crtc->pipe),
+ bw_state->data_rate[crtc->pipe],
+ bw_state->num_active_planes[crtc->pipe]);
+ }
+
+ if (!bw_state)
+ return 0;
+
+ data_rate = intel_bw_data_rate(dev_priv, bw_state);
+ num_active_planes = intel_bw_num_active_planes(dev_priv, bw_state);
+
+ max_data_rate = intel_max_data_rate(dev_priv, num_active_planes);
+
+ data_rate = DIV_ROUND_UP(data_rate, 1000);
+
+ if (data_rate > max_data_rate) {
+ DRM_DEBUG_KMS("Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n",
+ data_rate, max_data_rate, num_active_planes);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct drm_private_state *intel_bw_duplicate_state(struct drm_private_obj *obj)
+{
+ struct intel_bw_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void intel_bw_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ kfree(state);
+}
+
+static const struct drm_private_state_funcs intel_bw_funcs = {
+ .atomic_duplicate_state = intel_bw_duplicate_state,
+ .atomic_destroy_state = intel_bw_destroy_state,
+};
+
+int intel_bw_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_bw_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ drm_atomic_private_obj_init(&dev_priv->drm, &dev_priv->bw_obj,
+ &state->base, &intel_bw_funcs);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
new file mode 100644
index 000000000000..e9d9c6d63bc3
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_BW_H__
+#define __INTEL_BW_H__
+
+#include <drm/drm_atomic.h>
+
+#include "i915_drv.h"
+#include "intel_display.h"
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc_state;
+
+struct intel_bw_state {
+ struct drm_private_state base;
+
+ unsigned int data_rate[I915_MAX_PIPES];
+ u8 num_active_planes[I915_MAX_PIPES];
+};
+
+#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
+
+static inline struct intel_bw_state *
+intel_atomic_get_bw_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_private_state *bw_state;
+
+ bw_state = drm_atomic_get_private_obj_state(&state->base,
+ &dev_priv->bw_obj);
+ if (IS_ERR(bw_state))
+ return ERR_CAST(bw_state);
+
+ return to_intel_bw_state(bw_state);
+}
+
+void intel_bw_init_hw(struct drm_i915_private *dev_priv);
+int intel_bw_init(struct drm_i915_private *dev_priv);
+int intel_bw_atomic_check(struct intel_atomic_state *state);
+void intel_bw_crtc_update(struct intel_bw_state *bw_state,
+ const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_BW_H__ */
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index ae40a8679314..8993ab283562 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -23,6 +23,7 @@
#include "intel_cdclk.h"
#include "intel_drv.h"
+#include "intel_sideband.h"
/**
* DOC: CDCLK / RAWCLK
@@ -464,14 +465,18 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
{
u32 val;
+ vlv_iosf_sb_get(dev_priv,
+ BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
+
cdclk_state->vco = vlv_get_hpll_vco(dev_priv);
cdclk_state->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
CCK_DISPLAY_CLOCK_CONTROL,
cdclk_state->vco);
- mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
- mutex_unlock(&dev_priv->pcu_lock);
+
+ vlv_iosf_sb_put(dev_priv,
+ BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
if (IS_VALLEYVIEW(dev_priv))
cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK) >>
@@ -545,7 +550,11 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
*/
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
- mutex_lock(&dev_priv->pcu_lock);
+ vlv_iosf_sb_get(dev_priv,
+ BIT(VLV_IOSF_SB_CCK) |
+ BIT(VLV_IOSF_SB_BUNIT) |
+ BIT(VLV_IOSF_SB_PUNIT));
+
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK;
val |= (cmd << DSPFREQGUAR_SHIFT);
@@ -555,9 +564,6 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
50)) {
DRM_ERROR("timed out waiting for CDclk change\n");
}
- mutex_unlock(&dev_priv->pcu_lock);
-
- mutex_lock(&dev_priv->sb_lock);
if (cdclk == 400000) {
u32 divider;
@@ -591,7 +597,10 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
val |= 3000 / 250; /* 3.0 usec */
vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_iosf_sb_put(dev_priv,
+ BIT(VLV_IOSF_SB_CCK) |
+ BIT(VLV_IOSF_SB_BUNIT) |
+ BIT(VLV_IOSF_SB_PUNIT));
intel_update_cdclk(dev_priv);
@@ -627,7 +636,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
*/
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
- mutex_lock(&dev_priv->pcu_lock);
+ vlv_punit_get(dev_priv);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK_CHV;
val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
@@ -637,7 +646,8 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
50)) {
DRM_ERROR("timed out waiting for CDclk change\n");
}
- mutex_unlock(&dev_priv->pcu_lock);
+
+ vlv_punit_put(dev_priv);
intel_update_cdclk(dev_priv);
@@ -716,10 +726,8 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
"trying to change cdclk frequency with cdclk not enabled\n"))
return;
- mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv,
BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
- mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("failed to inform pcode about cdclk change\n");
return;
@@ -768,10 +776,8 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
DRM_ERROR("Switching back to LCPLL failed\n");
- mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
cdclk_state->voltage_level);
- mutex_unlock(&dev_priv->pcu_lock);
I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
@@ -803,20 +809,14 @@ static int skl_calc_cdclk(int min_cdclk, int vco)
static u8 skl_calc_voltage_level(int cdclk)
{
- switch (cdclk) {
- default:
- case 308571:
- case 337500:
- return 0;
- case 450000:
- case 432000:
- return 1;
- case 540000:
- return 2;
- case 617143:
- case 675000:
+ if (cdclk > 540000)
return 3;
- }
+ else if (cdclk > 450000)
+ return 2;
+ else if (cdclk > 337500)
+ return 1;
+ else
+ return 0;
}
static void skl_dpll0_update(struct drm_i915_private *dev_priv,
@@ -1010,12 +1010,10 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
*/
WARN_ON_ONCE(IS_SKYLAKE(dev_priv) && vco == 8640000);
- mutex_lock(&dev_priv->pcu_lock);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
- mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
ret);
@@ -1079,10 +1077,8 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
POSTING_READ(CDCLK_CTL);
/* inform PCU of the change */
- mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
cdclk_state->voltage_level);
- mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
}
@@ -1379,12 +1375,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
* requires us to wait up to 150usec, but that leads to timeouts;
* the 2ms used here is based on experiment.
*/
- mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write_timeout(dev_priv,
HSW_PCODE_DE_WRITE_FREQ_REQ,
0x80000000, 150, 2);
- mutex_unlock(&dev_priv->pcu_lock);
-
if (ret) {
DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
ret, cdclk);
@@ -1414,7 +1407,6 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
if (pipe != INVALID_PIPE)
intel_wait_for_vblank(dev_priv, pipe);
- mutex_lock(&dev_priv->pcu_lock);
/*
* The timeout isn't specified, the 2ms used here is based on
* experiment.
@@ -1424,8 +1416,6 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
ret = sandybridge_pcode_write_timeout(dev_priv,
HSW_PCODE_DE_WRITE_FREQ_REQ,
cdclk_state->voltage_level, 150, 2);
- mutex_unlock(&dev_priv->pcu_lock);
-
if (ret) {
DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
ret, cdclk);
@@ -1535,15 +1525,12 @@ static int cnl_calc_cdclk(int min_cdclk)
static u8 cnl_calc_voltage_level(int cdclk)
{
- switch (cdclk) {
- default:
- case 168000:
- return 0;
- case 336000:
- return 1;
- case 528000:
+ if (cdclk > 336000)
return 2;
- }
+ else if (cdclk > 168000)
+ return 1;
+ else
+ return 0;
}
static void cnl_cdclk_pll_update(struct drm_i915_private *dev_priv,
@@ -1648,12 +1635,10 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
u32 val, divider;
int ret;
- mutex_lock(&dev_priv->pcu_lock);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
- mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
ret);
@@ -1692,10 +1677,8 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
intel_wait_for_vblank(dev_priv, pipe);
/* inform PCU of the change */
- mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
cdclk_state->voltage_level);
- mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
@@ -1834,12 +1817,10 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
unsigned int vco = cdclk_state->vco;
int ret;
- mutex_lock(&dev_priv->pcu_lock);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
- mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
ret);
@@ -1861,10 +1842,8 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE |
skl_cdclk_decimal(cdclk));
- mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
cdclk_state->voltage_level);
- mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
@@ -1877,21 +1856,12 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
static u8 icl_calc_voltage_level(int cdclk)
{
- switch (cdclk) {
- case 50000:
- case 307200:
- case 312000:
- return 0;
- case 556800:
- case 552000:
- return 1;
- default:
- MISSING_CASE(cdclk);
- /* fall through */
- case 652800:
- case 648000:
+ if (cdclk > 556800)
return 2;
- }
+ else if (cdclk > 312000)
+ return 1;
+ else
+ return 0;
}
static void icl_get_cdclk(struct drm_i915_private *dev_priv,
@@ -2277,6 +2247,15 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
IS_VALLEYVIEW(dev_priv))
min_cdclk = max(320000, min_cdclk);
+ /*
+ * On Geminilake once the CDCLK gets as low as 79200
+ * picture gets unstable, despite that values are
+ * correct for DSI PLL and DE PLL.
+ */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
+ IS_GEMINILAKE(dev_priv))
+ min_cdclk = max(158400, min_cdclk);
+
if (min_cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
min_cdclk, dev_priv->max_cdclk_freq);
@@ -2286,29 +2265,28 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
return min_cdclk;
}
-static int intel_compute_min_cdclk(struct drm_atomic_state *state)
+static int intel_compute_min_cdclk(struct intel_atomic_state *state)
{
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_i915_private *dev_priv = to_i915(state->dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
int min_cdclk, i;
enum pipe pipe;
- memcpy(intel_state->min_cdclk, dev_priv->min_cdclk,
- sizeof(intel_state->min_cdclk));
+ memcpy(state->min_cdclk, dev_priv->min_cdclk,
+ sizeof(state->min_cdclk));
- for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
if (min_cdclk < 0)
return min_cdclk;
- intel_state->min_cdclk[i] = min_cdclk;
+ state->min_cdclk[i] = min_cdclk;
}
- min_cdclk = intel_state->cdclk.force_min_cdclk;
+ min_cdclk = state->cdclk.force_min_cdclk;
for_each_pipe(dev_priv, pipe)
- min_cdclk = max(intel_state->min_cdclk[pipe], min_cdclk);
+ min_cdclk = max(state->min_cdclk[pipe], min_cdclk);
return min_cdclk;
}
@@ -2350,10 +2328,9 @@ static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state)
return min_voltage_level;
}
-static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
+static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
int min_cdclk, cdclk;
min_cdclk = intel_compute_min_cdclk(state);
@@ -2362,28 +2339,25 @@ static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
cdclk = vlv_calc_cdclk(dev_priv, min_cdclk);
- intel_state->cdclk.logical.cdclk = cdclk;
- intel_state->cdclk.logical.voltage_level =
+ state->cdclk.logical.cdclk = cdclk;
+ state->cdclk.logical.voltage_level =
vlv_calc_voltage_level(dev_priv, cdclk);
- if (!intel_state->active_crtcs) {
- cdclk = vlv_calc_cdclk(dev_priv,
- intel_state->cdclk.force_min_cdclk);
+ if (!state->active_crtcs) {
+ cdclk = vlv_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
- intel_state->cdclk.actual.cdclk = cdclk;
- intel_state->cdclk.actual.voltage_level =
+ state->cdclk.actual.cdclk = cdclk;
+ state->cdclk.actual.voltage_level =
vlv_calc_voltage_level(dev_priv, cdclk);
} else {
- intel_state->cdclk.actual =
- intel_state->cdclk.logical;
+ state->cdclk.actual = state->cdclk.logical;
}
return 0;
}
-static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
+static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state)
{
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
int min_cdclk, cdclk;
min_cdclk = intel_compute_min_cdclk(state);
@@ -2396,36 +2370,35 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
*/
cdclk = bdw_calc_cdclk(min_cdclk);
- intel_state->cdclk.logical.cdclk = cdclk;
- intel_state->cdclk.logical.voltage_level =
+ state->cdclk.logical.cdclk = cdclk;
+ state->cdclk.logical.voltage_level =
bdw_calc_voltage_level(cdclk);
- if (!intel_state->active_crtcs) {
- cdclk = bdw_calc_cdclk(intel_state->cdclk.force_min_cdclk);
+ if (!state->active_crtcs) {
+ cdclk = bdw_calc_cdclk(state->cdclk.force_min_cdclk);
- intel_state->cdclk.actual.cdclk = cdclk;
- intel_state->cdclk.actual.voltage_level =
+ state->cdclk.actual.cdclk = cdclk;
+ state->cdclk.actual.voltage_level =
bdw_calc_voltage_level(cdclk);
} else {
- intel_state->cdclk.actual =
- intel_state->cdclk.logical;
+ state->cdclk.actual = state->cdclk.logical;
}
return 0;
}
-static int skl_dpll0_vco(struct intel_atomic_state *intel_state)
+static int skl_dpll0_vco(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
int vco, i;
- vco = intel_state->cdclk.logical.vco;
+ vco = state->cdclk.logical.vco;
if (!vco)
vco = dev_priv->skl_preferred_vco_freq;
- for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
if (!crtc_state->base.enable)
continue;
@@ -2450,16 +2423,15 @@ static int skl_dpll0_vco(struct intel_atomic_state *intel_state)
return vco;
}
-static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
+static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
{
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
int min_cdclk, cdclk, vco;
min_cdclk = intel_compute_min_cdclk(state);
if (min_cdclk < 0)
return min_cdclk;
- vco = skl_dpll0_vco(intel_state);
+ vco = skl_dpll0_vco(state);
/*
* FIXME should also account for plane ratio
@@ -2467,30 +2439,28 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
*/
cdclk = skl_calc_cdclk(min_cdclk, vco);
- intel_state->cdclk.logical.vco = vco;
- intel_state->cdclk.logical.cdclk = cdclk;
- intel_state->cdclk.logical.voltage_level =
+ state->cdclk.logical.vco = vco;
+ state->cdclk.logical.cdclk = cdclk;
+ state->cdclk.logical.voltage_level =
skl_calc_voltage_level(cdclk);
- if (!intel_state->active_crtcs) {
- cdclk = skl_calc_cdclk(intel_state->cdclk.force_min_cdclk, vco);
+ if (!state->active_crtcs) {
+ cdclk = skl_calc_cdclk(state->cdclk.force_min_cdclk, vco);
- intel_state->cdclk.actual.vco = vco;
- intel_state->cdclk.actual.cdclk = cdclk;
- intel_state->cdclk.actual.voltage_level =
+ state->cdclk.actual.vco = vco;
+ state->cdclk.actual.cdclk = cdclk;
+ state->cdclk.actual.voltage_level =
skl_calc_voltage_level(cdclk);
} else {
- intel_state->cdclk.actual =
- intel_state->cdclk.logical;
+ state->cdclk.actual = state->cdclk.logical;
}
return 0;
}
-static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
+static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
int min_cdclk, cdclk, vco;
min_cdclk = intel_compute_min_cdclk(state);
@@ -2505,36 +2475,34 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
vco = bxt_de_pll_vco(dev_priv, cdclk);
}
- intel_state->cdclk.logical.vco = vco;
- intel_state->cdclk.logical.cdclk = cdclk;
- intel_state->cdclk.logical.voltage_level =
+ state->cdclk.logical.vco = vco;
+ state->cdclk.logical.cdclk = cdclk;
+ state->cdclk.logical.voltage_level =
bxt_calc_voltage_level(cdclk);
- if (!intel_state->active_crtcs) {
+ if (!state->active_crtcs) {
if (IS_GEMINILAKE(dev_priv)) {
- cdclk = glk_calc_cdclk(intel_state->cdclk.force_min_cdclk);
+ cdclk = glk_calc_cdclk(state->cdclk.force_min_cdclk);
vco = glk_de_pll_vco(dev_priv, cdclk);
} else {
- cdclk = bxt_calc_cdclk(intel_state->cdclk.force_min_cdclk);
+ cdclk = bxt_calc_cdclk(state->cdclk.force_min_cdclk);
vco = bxt_de_pll_vco(dev_priv, cdclk);
}
- intel_state->cdclk.actual.vco = vco;
- intel_state->cdclk.actual.cdclk = cdclk;
- intel_state->cdclk.actual.voltage_level =
+ state->cdclk.actual.vco = vco;
+ state->cdclk.actual.cdclk = cdclk;
+ state->cdclk.actual.voltage_level =
bxt_calc_voltage_level(cdclk);
} else {
- intel_state->cdclk.actual =
- intel_state->cdclk.logical;
+ state->cdclk.actual = state->cdclk.logical;
}
return 0;
}
-static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
+static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
int min_cdclk, cdclk, vco;
min_cdclk = intel_compute_min_cdclk(state);
@@ -2544,33 +2512,31 @@ static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
cdclk = cnl_calc_cdclk(min_cdclk);
vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
- intel_state->cdclk.logical.vco = vco;
- intel_state->cdclk.logical.cdclk = cdclk;
- intel_state->cdclk.logical.voltage_level =
+ state->cdclk.logical.vco = vco;
+ state->cdclk.logical.cdclk = cdclk;
+ state->cdclk.logical.voltage_level =
max(cnl_calc_voltage_level(cdclk),
- cnl_compute_min_voltage_level(intel_state));
+ cnl_compute_min_voltage_level(state));
- if (!intel_state->active_crtcs) {
- cdclk = cnl_calc_cdclk(intel_state->cdclk.force_min_cdclk);
+ if (!state->active_crtcs) {
+ cdclk = cnl_calc_cdclk(state->cdclk.force_min_cdclk);
vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
- intel_state->cdclk.actual.vco = vco;
- intel_state->cdclk.actual.cdclk = cdclk;
- intel_state->cdclk.actual.voltage_level =
+ state->cdclk.actual.vco = vco;
+ state->cdclk.actual.cdclk = cdclk;
+ state->cdclk.actual.voltage_level =
cnl_calc_voltage_level(cdclk);
} else {
- intel_state->cdclk.actual =
- intel_state->cdclk.logical;
+ state->cdclk.actual = state->cdclk.logical;
}
return 0;
}
-static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
+static int icl_modeset_calc_cdclk(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- unsigned int ref = intel_state->cdclk.logical.ref;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ unsigned int ref = state->cdclk.logical.ref;
int min_cdclk, cdclk, vco;
min_cdclk = intel_compute_min_cdclk(state);
@@ -2580,22 +2546,22 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
cdclk = icl_calc_cdclk(min_cdclk, ref);
vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
- intel_state->cdclk.logical.vco = vco;
- intel_state->cdclk.logical.cdclk = cdclk;
- intel_state->cdclk.logical.voltage_level =
+ state->cdclk.logical.vco = vco;
+ state->cdclk.logical.cdclk = cdclk;
+ state->cdclk.logical.voltage_level =
max(icl_calc_voltage_level(cdclk),
- cnl_compute_min_voltage_level(intel_state));
+ cnl_compute_min_voltage_level(state));
- if (!intel_state->active_crtcs) {
- cdclk = icl_calc_cdclk(intel_state->cdclk.force_min_cdclk, ref);
+ if (!state->active_crtcs) {
+ cdclk = icl_calc_cdclk(state->cdclk.force_min_cdclk, ref);
vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
- intel_state->cdclk.actual.vco = vco;
- intel_state->cdclk.actual.cdclk = cdclk;
- intel_state->cdclk.actual.voltage_level =
+ state->cdclk.actual.vco = vco;
+ state->cdclk.actual.cdclk = cdclk;
+ state->cdclk.actual.voltage_level =
icl_calc_voltage_level(cdclk);
} else {
- intel_state->cdclk.actual = intel_state->cdclk.logical;
+ state->cdclk.actual = state->cdclk.logical;
}
return 0;
@@ -2817,28 +2783,22 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
} else if (IS_CANNONLAKE(dev_priv)) {
dev_priv->display.set_cdclk = cnl_set_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- cnl_modeset_calc_cdclk;
+ dev_priv->display.modeset_calc_cdclk = cnl_modeset_calc_cdclk;
} else if (IS_GEN9_LP(dev_priv)) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- bxt_modeset_calc_cdclk;
+ dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
} else if (IS_GEN9_BC(dev_priv)) {
dev_priv->display.set_cdclk = skl_set_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- skl_modeset_calc_cdclk;
+ dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk;
} else if (IS_BROADWELL(dev_priv)) {
dev_priv->display.set_cdclk = bdw_set_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- bdw_modeset_calc_cdclk;
+ dev_priv->display.modeset_calc_cdclk = bdw_modeset_calc_cdclk;
} else if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.set_cdclk = chv_set_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- vlv_modeset_calc_cdclk;
+ dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
} else if (IS_VALLEYVIEW(dev_priv)) {
dev_priv->display.set_cdclk = vlv_set_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- vlv_modeset_calc_cdclk;
+ dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
}
if (INTEL_GEN(dev_priv) >= 11)
diff --git a/drivers/gpu/drm/i915/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index 4d6f7f5f8930..4d6f7f5f8930 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 9093daabc290..23a84dd7989f 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -41,6 +41,7 @@
#define CTM_COEFF_ABS(coeff) ((coeff) & (CTM_COEFF_SIGN - 1))
#define LEGACY_LUT_LENGTH 256
+
/*
* Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
* format). This macro takes the coefficient we want transformed and the
@@ -607,7 +608,7 @@ static void bdw_load_lut_10(struct intel_crtc *crtc,
I915_WRITE(PREC_PAL_INDEX(pipe), 0);
}
-static void ivb_load_lut_10_max(struct intel_crtc *crtc)
+static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -640,7 +641,7 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_10_max(crtc);
+ ivb_load_lut_ext_max(crtc);
ivb_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(512));
} else {
@@ -648,7 +649,7 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
ivb_load_lut_10(crtc, blob,
PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_10_max(crtc);
+ ivb_load_lut_ext_max(crtc);
}
}
@@ -663,7 +664,7 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_10_max(crtc);
+ ivb_load_lut_ext_max(crtc);
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(512));
} else {
@@ -671,7 +672,7 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
bdw_load_lut_10(crtc, blob,
PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_10_max(crtc);
+ ivb_load_lut_ext_max(crtc);
}
}
@@ -763,10 +764,120 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state)
i9xx_load_luts(crtc_state);
} else {
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_10_max(crtc);
+ ivb_load_lut_ext_max(crtc);
+ }
+}
+
+/* ilk+ "12.4" interpolated format (high 10 bits) */
+static u32 ilk_lut_12p4_udw(const struct drm_color_lut *color)
+{
+ return (color->red >> 6) << 20 | (color->green >> 6) << 10 |
+ (color->blue >> 6);
+}
+
+/* ilk+ "12.4" interpolated format (low 6 bits) */
+static u32 ilk_lut_12p4_ldw(const struct drm_color_lut *color)
+{
+ return (color->red & 0x3f) << 24 | (color->green & 0x3f) << 14 |
+ (color->blue & 0x3f) << 4;
+}
+
+static void
+icl_load_gcmax(const struct intel_crtc_state *crtc_state,
+ const struct drm_color_lut *color)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ /* Fixme: LUT entries are 16 bit only, so we can prog 0xFFFF max */
+ I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), color->red);
+ I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), color->green);
+ I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), color->blue);
+}
+
+static void
+icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
+ const struct drm_color_lut *lut = blob->data;
+ enum pipe pipe = crtc->pipe;
+ u32 i;
+
+ /*
+ * Every entry in the multi-segment LUT is corresponding to a superfine
+ * segment step which is 1/(8 * 128 * 256).
+ *
+ * Superfine segment has 9 entries, corresponding to values
+ * 0, 1/(8 * 128 * 256), 2/(8 * 128 * 256) .... 8/(8 * 128 * 256).
+ */
+ I915_WRITE(PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
+
+ for (i = 0; i < 9; i++) {
+ const struct drm_color_lut *entry = &lut[i];
+
+ I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
}
+static void
+icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
+ const struct drm_color_lut *lut = blob->data;
+ const struct drm_color_lut *entry;
+ enum pipe pipe = crtc->pipe;
+ u32 i;
+
+ /*
+ *
+ * Program Fine segment (let's call it seg2)...
+ *
+ * Fine segment's step is 1/(128 * 256) ie 1/(128 * 256), 2/(128*256)
+ * ... 256/(128*256). So in order to program fine segment of LUT we
+ * need to pick every 8'th entry in LUT, and program 256 indexes.
+ *
+ * PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1],
+ * with seg2[0] being unused by the hardware.
+ */
+ I915_WRITE(PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
+ for (i = 1; i < 257; i++) {
+ entry = &lut[i * 8];
+ I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry));
+ I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry));
+ }
+
+ /*
+ * Program Coarse segment (let's call it seg3)...
+ *
+ * Coarse segment's starts from index 0 and it's step is 1/256 ie 0,
+ * 1/256, 2/256 ...256/256. As per the description of each entry in LUT
+ * above, we need to pick every (8 * 128)th entry in LUT, and
+ * program 256 of those.
+ *
+ * Spec is not very clear about if entries seg3[0] and seg3[1] are
+ * being used or not, but we still need to program these to advance
+ * the index.
+ */
+ for (i = 0; i < 256; i++) {
+ entry = &lut[i * 8 * 128];
+ I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry));
+ I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry));
+ }
+
+ /* The last entry in the LUT is to be programmed in GCMAX */
+ entry = &lut[256 * 8 * 128];
+ icl_load_gcmax(crtc_state, entry);
+ ivb_load_lut_ext_max(crtc);
+}
+
static void icl_load_luts(const struct intel_crtc_state *crtc_state)
{
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
@@ -775,65 +886,94 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
if (crtc_state->base.degamma_lut)
glk_load_degamma_lut(crtc_state);
- if ((crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) ==
- GAMMA_MODE_MODE_8BIT) {
+ switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
+ case GAMMA_MODE_MODE_8BIT:
i9xx_load_luts(crtc_state);
- } else {
+ break;
+
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ icl_program_gamma_superfine_segment(crtc_state);
+ icl_program_gamma_multi_segment(crtc_state);
+ break;
+
+ default:
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_10_max(crtc);
+ ivb_load_lut_ext_max(crtc);
}
}
-static void cherryview_load_luts(const struct intel_crtc_state *crtc_state)
+static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->green, 14) << 16 |
+ drm_color_lut_extract(color->blue, 14);
+}
+
+static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->red, 14);
+}
+
+static void chv_load_cgm_degamma(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
- cherryview_load_csc_matrix(crtc_state);
-
- if (crtc_state_is_legacy_gamma(crtc_state)) {
- i9xx_load_luts(crtc_state);
- return;
+ for (i = 0; i < lut_size; i++) {
+ I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 0),
+ chv_cgm_degamma_ldw(&lut[i]));
+ I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 1),
+ chv_cgm_degamma_udw(&lut[i]));
}
+}
- if (degamma_lut) {
- const struct drm_color_lut *lut = degamma_lut->data;
- int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+static u32 chv_cgm_gamma_ldw(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->green, 10) << 16 |
+ drm_color_lut_extract(color->blue, 10);
+}
- for (i = 0; i < lut_size; i++) {
- u32 word0, word1;
+static u32 chv_cgm_gamma_udw(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->red, 10);
+}
- /* Write LUT in U0.14 format. */
- word0 =
- (drm_color_lut_extract(lut[i].green, 14) << 16) |
- drm_color_lut_extract(lut[i].blue, 14);
- word1 = drm_color_lut_extract(lut[i].red, 14);
+static void chv_load_cgm_gamma(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
+ enum pipe pipe = crtc->pipe;
- I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 0), word0);
- I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 1), word1);
- }
+ for (i = 0; i < lut_size; i++) {
+ I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 0),
+ chv_cgm_gamma_ldw(&lut[i]));
+ I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1),
+ chv_cgm_gamma_udw(&lut[i]));
}
+}
- if (gamma_lut) {
- const struct drm_color_lut *lut = gamma_lut->data;
- int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
-
- for (i = 0; i < lut_size; i++) {
- u32 word0, word1;
+static void chv_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
- /* Write LUT in U0.10 format. */
- word0 =
- (drm_color_lut_extract(lut[i].green, 10) << 16) |
- drm_color_lut_extract(lut[i].blue, 10);
- word1 = drm_color_lut_extract(lut[i].red, 10);
+ cherryview_load_csc_matrix(crtc_state);
- I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 0), word0);
- I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1), word1);
- }
+ if (crtc_state_is_legacy_gamma(crtc_state)) {
+ i9xx_load_luts(crtc_state);
+ return;
}
+
+ if (degamma_lut)
+ chv_load_cgm_degamma(crtc, degamma_lut);
+
+ if (gamma_lut)
+ chv_load_cgm_gamma(crtc, gamma_lut);
}
void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
@@ -857,6 +997,14 @@ int intel_color_check(struct intel_crtc_state *crtc_state)
return dev_priv->display.color_check(crtc_state);
}
+void intel_color_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+ if (dev_priv->display.read_luts)
+ dev_priv->display.read_luts(crtc_state);
+}
+
static bool need_plane_update(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
@@ -937,8 +1085,10 @@ static int check_luts(const struct intel_crtc_state *crtc_state)
return 0;
/* C8 relies on its palette being stored in the legacy LUT */
- if (crtc_state->c8_planes)
+ if (crtc_state->c8_planes) {
+ DRM_DEBUG_KMS("C8 pixelformat requires the legacy LUT\n");
return -EINVAL;
+ }
degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size;
@@ -1187,7 +1337,7 @@ static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
crtc_state_is_legacy_gamma(crtc_state))
gamma_mode |= GAMMA_MODE_MODE_8BIT;
else
- gamma_mode |= GAMMA_MODE_MODE_10BIT;
+ gamma_mode |= GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED;
return gamma_mode;
}
@@ -1232,7 +1382,7 @@ void intel_color_init(struct intel_crtc *crtc)
if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.color_check = chv_color_check;
dev_priv->display.color_commit = i9xx_color_commit;
- dev_priv->display.load_luts = cherryview_load_luts;
+ dev_priv->display.load_luts = chv_load_luts;
} else if (INTEL_GEN(dev_priv) >= 4) {
dev_priv->display.color_check = i9xx_color_check;
dev_priv->display.color_commit = i9xx_color_commit;
diff --git a/drivers/gpu/drm/i915/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
index b8a3ce609587..057e8ac63555 100644
--- a/drivers/gpu/drm/i915/intel_color.h
+++ b/drivers/gpu/drm/i915/display/intel_color.h
@@ -13,5 +13,6 @@ void intel_color_init(struct intel_crtc *crtc);
int intel_color_check(struct intel_crtc_state *crtc_state);
void intel_color_commit(const struct intel_crtc_state *crtc_state);
void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
+void intel_color_get_config(struct intel_crtc_state *crtc_state);
#endif /* __INTEL_COLOR_H__ */
diff --git a/drivers/gpu/drm/i915/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 2bf4359d7e41..841708da5a56 100644
--- a/drivers/gpu/drm/i915/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -3,6 +3,7 @@
* Copyright © 2018 Intel Corporation
*/
+#include "intel_combo_phy.h"
#include "intel_drv.h"
#define for_each_combo_port(__dev_priv, __port) \
@@ -147,7 +148,7 @@ static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
return ret;
}
-void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
+static void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -167,7 +168,7 @@ void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
I915_WRITE(CNL_PORT_CL1CM_DW5, val);
}
-void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -197,13 +198,69 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
ret = cnl_verify_procmon_ref_values(dev_priv, port);
+ if (port == PORT_A)
+ ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW8(port),
+ IREFGEN, IREFGEN);
+
ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port),
CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
return ret;
}
-void icl_combo_phys_init(struct drm_i915_private *dev_priv)
+void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
+ enum port port, bool is_dsi,
+ int lane_count, bool lane_reversal)
+{
+ u8 lane_mask;
+ u32 val;
+
+ if (is_dsi) {
+ WARN_ON(lane_reversal);
+
+ switch (lane_count) {
+ case 1:
+ lane_mask = PWR_DOWN_LN_3_1_0;
+ break;
+ case 2:
+ lane_mask = PWR_DOWN_LN_3_1;
+ break;
+ case 3:
+ lane_mask = PWR_DOWN_LN_3;
+ break;
+ default:
+ MISSING_CASE(lane_count);
+ /* fall-through */
+ case 4:
+ lane_mask = PWR_UP_ALL_LANES;
+ break;
+ }
+ } else {
+ switch (lane_count) {
+ case 1:
+ lane_mask = lane_reversal ? PWR_DOWN_LN_2_1_0 :
+ PWR_DOWN_LN_3_2_1;
+ break;
+ case 2:
+ lane_mask = lane_reversal ? PWR_DOWN_LN_1_0 :
+ PWR_DOWN_LN_3_2;
+ break;
+ default:
+ MISSING_CASE(lane_count);
+ /* fall-through */
+ case 4:
+ lane_mask = PWR_UP_ALL_LANES;
+ break;
+ }
+ }
+
+ val = I915_READ(ICL_PORT_CL_DW10(port));
+ val &= ~PWR_DOWN_LN_MASK;
+ val |= lane_mask << PWR_DOWN_LN_SHIFT;
+ I915_WRITE(ICL_PORT_CL_DW10(port), val);
+}
+
+static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
{
enum port port;
@@ -222,6 +279,12 @@ void icl_combo_phys_init(struct drm_i915_private *dev_priv)
cnl_set_procmon_ref_values(dev_priv, port);
+ if (port == PORT_A) {
+ val = I915_READ(ICL_PORT_COMP_DW8(port));
+ val |= IREFGEN;
+ I915_WRITE(ICL_PORT_COMP_DW8(port), val);
+ }
+
val = I915_READ(ICL_PORT_COMP_DW0(port));
val |= COMP_INIT;
I915_WRITE(ICL_PORT_COMP_DW0(port), val);
@@ -232,7 +295,7 @@ void icl_combo_phys_init(struct drm_i915_private *dev_priv)
}
}
-void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
{
enum port port;
@@ -253,3 +316,19 @@ void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
I915_WRITE(ICL_PORT_COMP_DW0(port), val);
}
}
+
+void intel_combo_phy_init(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11)
+ icl_combo_phys_init(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_combo_phys_init(i915);
+}
+
+void intel_combo_phy_uninit(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11)
+ icl_combo_phys_uninit(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_combo_phys_uninit(i915);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.h b/drivers/gpu/drm/i915/display/intel_combo_phy.h
new file mode 100644
index 000000000000..e6e195a83b19
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_COMBO_PHY_H__
+#define __INTEL_COMBO_PHY_H__
+
+#include <linux/types.h>
+#include <drm/i915_drm.h>
+
+struct drm_i915_private;
+
+void intel_combo_phy_init(struct drm_i915_private *dev_priv);
+void intel_combo_phy_uninit(struct drm_i915_private *dev_priv);
+void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
+ enum port port, bool is_dsi,
+ int lane_count, bool lane_reversal);
+
+#endif /* __INTEL_COMBO_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 073b6c3ab7cc..41310f8e5a2a 100644
--- a/drivers/gpu/drm/i915/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -29,11 +29,12 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include "display/intel_panel.h"
+
#include "i915_drv.h"
#include "intel_connector.h"
#include "intel_drv.h"
#include "intel_hdcp.h"
-#include "intel_panel.h"
int intel_connector_init(struct intel_connector *connector)
{
diff --git a/drivers/gpu/drm/i915/intel_connector.h b/drivers/gpu/drm/i915/display/intel_connector.h
index 93a7375c8196..93a7375c8196 100644
--- a/drivers/gpu/drm/i915/intel_connector.h
+++ b/drivers/gpu/drm/i915/display/intel_connector.h
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index b665c370111b..3fcf2f84bcce 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -39,6 +39,9 @@
#include "intel_crt.h"
#include "intel_ddi.h"
#include "intel_drv.h"
+#include "intel_fifo_underrun.h"
+#include "intel_gmbus.h"
+#include "intel_hotplug.h"
/* Here's the desired hotplug mode */
#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
@@ -640,6 +643,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
{
struct drm_device *dev = crt->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 save_bclrpat;
u32 save_vtotal;
u32 vtotal, vactive;
@@ -660,9 +664,9 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
pipeconf_reg = PIPECONF(pipe);
pipe_dsl_reg = PIPEDSL(pipe);
- save_bclrpat = I915_READ(bclrpat_reg);
- save_vtotal = I915_READ(vtotal_reg);
- vblank = I915_READ(vblank_reg);
+ save_bclrpat = intel_uncore_read(uncore, bclrpat_reg);
+ save_vtotal = intel_uncore_read(uncore, vtotal_reg);
+ vblank = intel_uncore_read(uncore, vblank_reg);
vtotal = ((save_vtotal >> 16) & 0xfff) + 1;
vactive = (save_vtotal & 0x7ff) + 1;
@@ -671,21 +675,23 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
vblank_end = ((vblank >> 16) & 0xfff) + 1;
/* Set the border color to purple. */
- I915_WRITE(bclrpat_reg, 0x500050);
+ intel_uncore_write(uncore, bclrpat_reg, 0x500050);
if (!IS_GEN(dev_priv, 2)) {
- u32 pipeconf = I915_READ(pipeconf_reg);
- I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
- POSTING_READ(pipeconf_reg);
+ u32 pipeconf = intel_uncore_read(uncore, pipeconf_reg);
+ intel_uncore_write(uncore,
+ pipeconf_reg,
+ pipeconf | PIPECONF_FORCE_BORDER);
+ intel_uncore_posting_read(uncore, pipeconf_reg);
/* Wait for next Vblank to substitue
* border color for Color info */
intel_wait_for_vblank(dev_priv, pipe);
- st00 = I915_READ8(_VGA_MSR_WRITE);
+ st00 = intel_uncore_read8(uncore, _VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected :
connector_status_disconnected;
- I915_WRITE(pipeconf_reg, pipeconf);
+ intel_uncore_write(uncore, pipeconf_reg, pipeconf);
} else {
bool restore_vblank = false;
int count, detect;
@@ -699,9 +705,10 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
u32 vsync_start = (vsync & 0xffff) + 1;
vblank_start = vsync_start;
- I915_WRITE(vblank_reg,
- (vblank_start - 1) |
- ((vblank_end - 1) << 16));
+ intel_uncore_write(uncore,
+ vblank_reg,
+ (vblank_start - 1) |
+ ((vblank_end - 1) << 16));
restore_vblank = true;
}
/* sample in the vertical border, selecting the larger one */
@@ -713,9 +720,10 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
/*
* Wait for the border to be displayed
*/
- while (I915_READ(pipe_dsl_reg) >= vactive)
+ while (intel_uncore_read(uncore, pipe_dsl_reg) >= vactive)
;
- while ((dsl = I915_READ(pipe_dsl_reg)) <= vsample)
+ while ((dsl = intel_uncore_read(uncore, pipe_dsl_reg)) <=
+ vsample)
;
/*
* Watch ST00 for an entire scanline
@@ -725,14 +733,14 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
do {
count++;
/* Read the ST00 VGA status register */
- st00 = I915_READ8(_VGA_MSR_WRITE);
+ st00 = intel_uncore_read8(uncore, _VGA_MSR_WRITE);
if (st00 & (1 << 4))
detect++;
- } while ((I915_READ(pipe_dsl_reg) == dsl));
+ } while ((intel_uncore_read(uncore, pipe_dsl_reg) == dsl));
/* restore vblank if necessary */
if (restore_vblank)
- I915_WRITE(vblank_reg, vblank);
+ intel_uncore_write(uncore, vblank_reg, vblank);
/*
* If more than 3/4 of the scanline detected a monitor,
* then it is assumed to be present. This works even on i830,
@@ -745,7 +753,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
}
/* Restore previous settings */
- I915_WRITE(bclrpat_reg, save_bclrpat);
+ intel_uncore_write(uncore, bclrpat_reg, save_bclrpat);
return status;
}
diff --git a/drivers/gpu/drm/i915/intel_crt.h b/drivers/gpu/drm/i915/display/intel_crt.h
index 1b3fba359efc..1b3fba359efc 100644
--- a/drivers/gpu/drm/i915/intel_crt.h
+++ b/drivers/gpu/drm/i915/display/intel_crt.h
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index f181c26f62fd..7925a176f900 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -29,16 +29,23 @@
#include "i915_drv.h"
#include "intel_audio.h"
+#include "intel_combo_phy.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_dp.h"
+#include "intel_dp_link_training.h"
+#include "intel_dpio_phy.h"
#include "intel_drv.h"
#include "intel_dsi.h"
+#include "intel_fifo_underrun.h"
+#include "intel_gmbus.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
+#include "intel_hotplug.h"
#include "intel_lspcon.h"
#include "intel_panel.h"
#include "intel_psr.h"
+#include "intel_vdsc.h"
struct ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
@@ -608,7 +615,7 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
+ if (IS_KBL_ULX(dev_priv) || IS_CFL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
return kbl_y_ddi_translations_dp;
} else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) {
@@ -624,7 +631,8 @@ static const struct ddi_buf_trans *
skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
if (dev_priv->vbt.edp.low_vswing) {
- if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
+ if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) ||
+ IS_CFL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
return skl_y_ddi_translations_edp;
} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) ||
@@ -646,7 +654,8 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
+ if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) ||
+ IS_CFL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
return skl_y_ddi_translations_hdmi;
} else {
@@ -1214,19 +1223,30 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
return ret;
}
-#define LC_FREQ 2700
-
static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
- int refclk = LC_FREQ;
+ int refclk;
int n, p, r;
u32 wrpll;
wrpll = I915_READ(reg);
- switch (wrpll & WRPLL_PLL_REF_MASK) {
- case WRPLL_PLL_SSC:
- case WRPLL_PLL_NON_SSC:
+ switch (wrpll & WRPLL_REF_MASK) {
+ case WRPLL_REF_SPECIAL_HSW:
+ /*
+ * muxed-SSC for BDW.
+ * non-SSC for non-ULT HSW. Check FUSE_STRAP3
+ * for the non-SSC reference frequency.
+ */
+ if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
+ if (I915_READ(FUSE_STRAP3) & HSW_REF_CLK_SELECT)
+ refclk = 24;
+ else
+ refclk = 135;
+ break;
+ }
+ /* fall through */
+ case WRPLL_REF_PCH_SSC:
/*
* We could calculate spread here, but our checking
* code only cares about 5% accuracy, and spread is a max of
@@ -1234,11 +1254,11 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
*/
refclk = 135;
break;
- case WRPLL_PLL_LCPLL:
- refclk = LC_FREQ;
+ case WRPLL_REF_LCPLL:
+ refclk = 2700;
break;
default:
- WARN(1, "bad wrpll refclk\n");
+ MISSING_CASE(wrpll);
return 0;
}
@@ -1450,7 +1470,8 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
else
dotclock = pipe_config->port_clock;
- if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
+ !intel_crtc_has_dp_encoder(pipe_config))
dotclock *= 2;
if (pipe_config->pixel_multiplier)
@@ -1605,12 +1626,12 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(1));
break;
case PORT_CLK_SEL_SPLL:
- pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
- if (pll == SPLL_PLL_FREQ_810MHz)
+ pll = I915_READ(SPLL_CTL) & SPLL_FREQ_MASK;
+ if (pll == SPLL_FREQ_810MHz)
link_clock = 81000;
- else if (pll == SPLL_PLL_FREQ_1350MHz)
+ else if (pll == SPLL_FREQ_1350MHz)
link_clock = 135000;
- else if (pll == SPLL_PLL_FREQ_2700MHz)
+ else if (pll == SPLL_FREQ_2700MHz)
link_clock = 270000;
else {
WARN(1, "bad spll freq\n");
@@ -1710,6 +1731,14 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
*/
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR;
+ /*
+ * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
+ * of Color Encoding Format and Content Color Gamut] while sending
+ * YCBCR 420 signals we should program MSA MISC1 fields which
+ * indicate VSC SDP for the Pixel Encoding/Colorimetry Format.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ temp |= TRANS_MSA_USE_VSC_SDP;
I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
@@ -1772,9 +1801,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
* eDP when not using the panel fitter, and when not
* using motion blur mitigation (which we don't
* support). */
- if (IS_HASWELL(dev_priv) &&
- (crtc_state->pch_pfit.enabled ||
- crtc_state->pch_pfit.force_thru))
+ if (crtc_state->pch_pfit.force_thru)
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -3111,6 +3138,15 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
else
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
+ if (intel_port_is_combophy(dev_priv, port)) {
+ bool lane_reversal =
+ dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+
+ intel_combo_phy_power_up_lanes(dev_priv, port, false,
+ crtc_state->lane_count,
+ lane_reversal);
+ }
+
intel_ddi_init_dp_buf_reg(encoder);
if (!is_mst)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
@@ -3375,6 +3411,7 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
intel_edp_backlight_on(crtc_state, conn_state);
intel_psr_enable(intel_dp, crtc_state);
+ intel_dp_ycbcr_420_enable(intel_dp, crtc_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
if (crtc_state->has_audio)
@@ -3626,7 +3663,7 @@ intel_ddi_post_pll_disable(struct intel_encoder *encoder,
intel_ddi_main_link_aux_domain(dig_port));
}
-void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
+static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
@@ -3820,6 +3857,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
intel_read_infoframe(encoder, pipe_config,
HDMI_INFOFRAME_TYPE_VENDOR,
&pipe_config->infoframes.hdmi);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_DRM,
+ &pipe_config->infoframes.drm);
}
static enum intel_output_type
@@ -3844,6 +3884,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
int ret;
@@ -3858,6 +3899,12 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
+ if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A &&
+ pipe_config->cpu_transcoder == TRANSCODER_EDP)
+ pipe_config->pch_pfit.force_thru =
+ pipe_config->pch_pfit.enabled ||
+ pipe_config->crc_enabled;
+
if (IS_GEN9_LP(dev_priv))
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
@@ -3865,7 +3912,6 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
return 0;
-
}
static void intel_ddi_encoder_suspend(struct intel_encoder *encoder)
@@ -3925,6 +3971,9 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
return NULL;
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
+ intel_dig_port->dp.prepare_link_retrain =
+ intel_ddi_prepare_link_retrain;
+
if (!intel_dp_init_connector(intel_dig_port, connector)) {
kfree(connector);
return NULL;
diff --git a/drivers/gpu/drm/i915/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 9cf69175942e..a08365da2643 100644
--- a/drivers/gpu/drm/i915/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -31,7 +31,6 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
-void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 75105a2c59ea..8592a7d422de 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -44,31 +44,40 @@
#include <drm/drm_rect.h>
#include <drm/i915_drm.h>
+#include "display/intel_crt.h"
+#include "display/intel_ddi.h"
+#include "display/intel_dp.h"
+#include "display/intel_dsi.h"
+#include "display/intel_dvo.h"
+#include "display/intel_gmbus.h"
+#include "display/intel_hdmi.h"
+#include "display/intel_lvds.h"
+#include "display/intel_sdvo.h"
+#include "display/intel_tv.h"
+#include "display/intel_vdsc.h"
+
#include "i915_drv.h"
-#include "i915_gem_clflush.h"
-#include "i915_reset.h"
#include "i915_trace.h"
+#include "intel_acpi.h"
+#include "intel_atomic.h"
#include "intel_atomic_plane.h"
+#include "intel_bw.h"
#include "intel_color.h"
#include "intel_cdclk.h"
-#include "intel_crt.h"
-#include "intel_ddi.h"
-#include "intel_dp.h"
#include "intel_drv.h"
-#include "intel_dsi.h"
-#include "intel_dvo.h"
#include "intel_fbc.h"
#include "intel_fbdev.h"
+#include "intel_fifo_underrun.h"
#include "intel_frontbuffer.h"
#include "intel_hdcp.h"
-#include "intel_hdmi.h"
-#include "intel_lvds.h"
+#include "intel_hotplug.h"
+#include "intel_overlay.h"
#include "intel_pipe_crc.h"
#include "intel_pm.h"
#include "intel_psr.h"
-#include "intel_sdvo.h"
+#include "intel_quirks.h"
+#include "intel_sideband.h"
#include "intel_sprite.h"
-#include "intel_tv.h"
/* Primary plane formats for gen <= 3 */
static const u32 i8xx_primary_formats[] = {
@@ -120,7 +129,7 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
-static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
+static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
@@ -153,10 +162,8 @@ int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
/* Obtain SKU information */
- mutex_lock(&dev_priv->sb_lock);
hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
CCK_FUSE_HPLL_FREQ_MASK;
- mutex_unlock(&dev_priv->sb_lock);
return vco_freq[hpll_freq] * 1000;
}
@@ -167,10 +174,7 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
u32 val;
int divider;
- mutex_lock(&dev_priv->sb_lock);
val = vlv_cck_read(dev_priv, reg);
- mutex_unlock(&dev_priv->sb_lock);
-
divider = val & CCK_FREQUENCY_VALUES;
WARN((val & CCK_FREQUENCY_STATUS) !=
@@ -183,11 +187,18 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
const char *name, u32 reg)
{
+ int hpll;
+
+ vlv_cck_get(dev_priv);
+
if (dev_priv->hpll_freq == 0)
dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
- return vlv_get_cck_clock(dev_priv, name, reg,
- dev_priv->hpll_freq);
+ hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
+
+ vlv_cck_put(dev_priv);
+
+ return hpll;
}
static void intel_update_czclk(struct drm_i915_private *dev_priv)
@@ -476,6 +487,7 @@ static const struct intel_limit intel_limits_bxt = {
.p2 = { .p2_slow = 1, .p2_fast = 20 },
};
+/* WA Display #0827: Gen9:all */
static void
skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
{
@@ -489,6 +501,19 @@ skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
}
+/* Wa_2006604312:icl */
+static void
+icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
+ bool enable)
+{
+ if (enable)
+ I915_WRITE(CLKGATE_DIS_PSL(pipe),
+ I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
+ else
+ I915_WRITE(CLKGATE_DIS_PSL(pipe),
+ I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
+}
+
static bool
needs_modeset(const struct drm_crtc_state *state)
{
@@ -551,7 +576,7 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
clock->p = clock->p1 * clock->p2;
if (WARN_ON(clock->n == 0 || clock->p == 0))
return 0;
- clock->vco = DIV_ROUND_CLOSEST_ULL((u64)refclk * clock->m,
+ clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
clock->n << 22);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
@@ -936,8 +961,8 @@ chv_find_best_dpll(const struct intel_limit *limit,
clock.p = clock.p1 * clock.p2;
- m2 = DIV_ROUND_CLOSEST_ULL(((u64)target * clock.p *
- clock.n) << 22, refclk * clock.m1);
+ m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
+ refclk * clock.m1);
if (m2 > INT_MAX/clock.m1)
continue;
@@ -1080,9 +1105,9 @@ void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
u32 val;
bool cur_state;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_cck_get(dev_priv);
val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_cck_put(dev_priv);
cur_state = val & DSI_PLL_VCO_EN;
I915_STATE_WARN(cur_state != state,
@@ -1392,14 +1417,14 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
enum dpio_channel port = vlv_pipe_to_channel(pipe);
u32 tmp;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* Enable back the 10bit clock to display controller */
tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
tmp |= DPIO_DCLKP_EN;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
/*
* Need to wait > 100ns between dclkp clock enable bit and PLL enable.
@@ -1556,14 +1581,14 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
I915_WRITE(DPLL(pipe), val);
POSTING_READ(DPLL(pipe));
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* Disable 10bit clock to display controller */
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
val &= ~DPIO_DCLKP_EN;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
}
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
@@ -1891,7 +1916,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
- return cpp;
+ return intel_tile_size(dev_priv);
case I915_FORMAT_MOD_X_TILED:
if (IS_GEN(dev_priv, 2))
return 128;
@@ -1934,11 +1959,8 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
static unsigned int
intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
{
- if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
- return 1;
- else
- return intel_tile_size(to_i915(fb->dev)) /
- intel_tile_width_bytes(fb, color_plane);
+ return intel_tile_size(to_i915(fb->dev)) /
+ intel_tile_width_bytes(fb, color_plane);
}
/* Return the tile dimensions in pixel units */
@@ -1973,6 +1995,17 @@ unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info
return size;
}
+unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
+{
+ unsigned int size = 0;
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
+ size += rem_info->plane[i].width * rem_info->plane[i].height;
+
+ return size;
+}
+
static void
intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
const struct drm_framebuffer *fb,
@@ -2042,7 +2075,9 @@ static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
+ return INTEL_GEN(dev_priv) < 4 ||
+ (plane->has_fbc &&
+ plane_state->view.type == I915_GGTT_VIEW_NORMAL);
}
struct i915_vma *
@@ -2078,7 +2113,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
* intel_runtime_pm_put(), so it is correct to wrap only the
* pin/unpin/fence and not more.
*/
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ i915_gem_object_lock(obj);
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
@@ -2133,7 +2169,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
err:
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
- intel_runtime_pm_put(dev_priv, wakeref);
+ i915_gem_object_unlock(obj);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return vma;
}
@@ -2141,9 +2178,12 @@ void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+ i915_gem_object_lock(vma->obj);
if (flags & PLANE_HAS_FENCE)
i915_vma_unpin_fence(vma);
i915_gem_object_unpin_from_display_plane(vma);
+ i915_gem_object_unlock(vma->obj);
+
i915_vma_put(vma);
}
@@ -2183,16 +2223,8 @@ void intel_add_fb_offsets(int *x, int *y,
int color_plane)
{
- const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
- unsigned int rotation = state->base.rotation;
-
- if (drm_rotation_90_or_270(rotation)) {
- *x += intel_fb->rotated[color_plane].x;
- *y += intel_fb->rotated[color_plane].y;
- } else {
- *x += intel_fb->normal[color_plane].x;
- *y += intel_fb->normal[color_plane].y;
- }
+ *x += state->color_plane[color_plane].x;
+ *y += state->color_plane[color_plane].y;
}
static u32 intel_adjust_tile_offset(int *x, int *y,
@@ -2476,6 +2508,134 @@ bool is_ccs_modifier(u64 modifier)
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
}
+u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
+ u32 pixel_format, u64 modifier)
+{
+ struct intel_crtc *crtc;
+ struct intel_plane *plane;
+
+ /*
+ * We assume the primary plane for pipe A has
+ * the highest stride limits of them all.
+ */
+ crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+ plane = to_intel_plane(crtc->base.primary);
+
+ return plane->max_stride(plane, pixel_format, modifier,
+ DRM_MODE_ROTATE_0);
+}
+
+static
+u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
+ u32 pixel_format, u64 modifier)
+{
+ /*
+ * Arbitrary limit for gen4+ chosen to match the
+ * render engine max stride.
+ *
+ * The new CCS hash mode makes remapping impossible
+ */
+ if (!is_ccs_modifier(modifier)) {
+ if (INTEL_GEN(dev_priv) >= 7)
+ return 256*1024;
+ else if (INTEL_GEN(dev_priv) >= 4)
+ return 128*1024;
+ }
+
+ return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
+}
+
+static u32
+intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(fb->dev);
+
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+ u32 max_stride = intel_plane_fb_max_stride(dev_priv,
+ fb->format->format,
+ fb->modifier);
+
+ /*
+ * To make remapping with linear generally feasible
+ * we need the stride to be page aligned.
+ */
+ if (fb->pitches[color_plane] > max_stride)
+ return intel_tile_size(dev_priv);
+ else
+ return 64;
+ } else {
+ return intel_tile_width_bytes(fb, color_plane);
+ }
+}
+
+bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ int i;
+
+ /* We don't want to deal with remapping with cursors */
+ if (plane->id == PLANE_CURSOR)
+ return false;
+
+ /*
+ * The display engine limits already match/exceed the
+ * render engine limits, so not much point in remapping.
+ * Would also need to deal with the fence POT alignment
+ * and gen2 2KiB GTT tile size.
+ */
+ if (INTEL_GEN(dev_priv) < 4)
+ return false;
+
+ /*
+ * The new CCS hash mode isn't compatible with remapping as
+ * the virtual address of the pages affects the compressed data.
+ */
+ if (is_ccs_modifier(fb->modifier))
+ return false;
+
+ /* Linear needs a page aligned stride for remapping */
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+ unsigned int alignment = intel_tile_size(dev_priv) - 1;
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ if (fb->pitches[i] & alignment)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int rotation = plane_state->base.rotation;
+ u32 stride, max_stride;
+
+ /*
+ * No remapping for invisible planes since we don't have
+ * an actual source viewport to remap.
+ */
+ if (!plane_state->base.visible)
+ return false;
+
+ if (!intel_plane_can_remap(plane_state))
+ return false;
+
+ /*
+ * FIXME: aux plane limits on gen9+ are
+ * unclear in Bspec, for now no checking.
+ */
+ stride = intel_fb_pitch(fb, 0, rotation);
+ max_stride = plane->max_stride(plane, fb->format->format,
+ fb->modifier, rotation);
+
+ return stride > max_stride;
+}
+
static int
intel_fill_fb_info(struct drm_i915_private *dev_priv,
struct drm_framebuffer *fb)
@@ -2641,6 +2801,168 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
return 0;
}
+static void
+intel_plane_remap_gtt(struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct intel_rotation_info *info = &plane_state->view.rotated;
+ unsigned int rotation = plane_state->base.rotation;
+ int i, num_planes = fb->format->num_planes;
+ unsigned int tile_size = intel_tile_size(dev_priv);
+ unsigned int src_x, src_y;
+ unsigned int src_w, src_h;
+ u32 gtt_offset = 0;
+
+ memset(&plane_state->view, 0, sizeof(plane_state->view));
+ plane_state->view.type = drm_rotation_90_or_270(rotation) ?
+ I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
+
+ src_x = plane_state->base.src.x1 >> 16;
+ src_y = plane_state->base.src.y1 >> 16;
+ src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ src_h = drm_rect_height(&plane_state->base.src) >> 16;
+
+ WARN_ON(is_ccs_modifier(fb->modifier));
+
+ /* Make src coordinates relative to the viewport */
+ drm_rect_translate(&plane_state->base.src,
+ -(src_x << 16), -(src_y << 16));
+
+ /* Rotate src coordinates to match rotated GTT view */
+ if (drm_rotation_90_or_270(rotation))
+ drm_rect_rotate(&plane_state->base.src,
+ src_w << 16, src_h << 16,
+ DRM_MODE_ROTATE_270);
+
+ for (i = 0; i < num_planes; i++) {
+ unsigned int hsub = i ? fb->format->hsub : 1;
+ unsigned int vsub = i ? fb->format->vsub : 1;
+ unsigned int cpp = fb->format->cpp[i];
+ unsigned int tile_width, tile_height;
+ unsigned int width, height;
+ unsigned int pitch_tiles;
+ unsigned int x, y;
+ u32 offset;
+
+ intel_tile_dims(fb, i, &tile_width, &tile_height);
+
+ x = src_x / hsub;
+ y = src_y / vsub;
+ width = src_w / hsub;
+ height = src_h / vsub;
+
+ /*
+ * First pixel of the src viewport from the
+ * start of the normal gtt mapping.
+ */
+ x += intel_fb->normal[i].x;
+ y += intel_fb->normal[i].y;
+
+ offset = intel_compute_aligned_offset(dev_priv, &x, &y,
+ fb, i, fb->pitches[i],
+ DRM_MODE_ROTATE_0, tile_size);
+ offset /= tile_size;
+
+ info->plane[i].offset = offset;
+ info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
+ tile_width * cpp);
+ info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
+ info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
+
+ if (drm_rotation_90_or_270(rotation)) {
+ struct drm_rect r;
+
+ /* rotate the x/y offsets to match the GTT view */
+ r.x1 = x;
+ r.y1 = y;
+ r.x2 = x + width;
+ r.y2 = y + height;
+ drm_rect_rotate(&r,
+ info->plane[i].width * tile_width,
+ info->plane[i].height * tile_height,
+ DRM_MODE_ROTATE_270);
+ x = r.x1;
+ y = r.y1;
+
+ pitch_tiles = info->plane[i].height;
+ plane_state->color_plane[i].stride = pitch_tiles * tile_height;
+
+ /* rotate the tile dimensions to match the GTT view */
+ swap(tile_width, tile_height);
+ } else {
+ pitch_tiles = info->plane[i].width;
+ plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
+ }
+
+ /*
+ * We only keep the x/y offsets, so push all of the
+ * gtt offset into the x/y offsets.
+ */
+ intel_adjust_tile_offset(&x, &y,
+ tile_width, tile_height,
+ tile_size, pitch_tiles,
+ gtt_offset * tile_size, 0);
+
+ gtt_offset += info->plane[i].width * info->plane[i].height;
+
+ plane_state->color_plane[i].offset = 0;
+ plane_state->color_plane[i].x = x;
+ plane_state->color_plane[i].y = y;
+ }
+}
+
+static int
+intel_plane_compute_gtt(struct intel_plane_state *plane_state)
+{
+ const struct intel_framebuffer *fb =
+ to_intel_framebuffer(plane_state->base.fb);
+ unsigned int rotation = plane_state->base.rotation;
+ int i, num_planes;
+
+ if (!fb)
+ return 0;
+
+ num_planes = fb->base.format->num_planes;
+
+ if (intel_plane_needs_remap(plane_state)) {
+ intel_plane_remap_gtt(plane_state);
+
+ /*
+ * Sometimes even remapping can't overcome
+ * the stride limitations :( Can happen with
+ * big plane sizes and suitably misaligned
+ * offsets.
+ */
+ return intel_plane_check_stride(plane_state);
+ }
+
+ intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
+
+ for (i = 0; i < num_planes; i++) {
+ plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
+ plane_state->color_plane[i].offset = 0;
+
+ if (drm_rotation_90_or_270(rotation)) {
+ plane_state->color_plane[i].x = fb->rotated[i].x;
+ plane_state->color_plane[i].y = fb->rotated[i].y;
+ } else {
+ plane_state->color_plane[i].x = fb->normal[i].x;
+ plane_state->color_plane[i].y = fb->normal[i].y;
+ }
+ }
+
+ /* Rotate src coordinates to match rotated GTT view */
+ if (drm_rotation_90_or_270(rotation))
+ drm_rect_rotate(&plane_state->base.src,
+ fb->base.width << 16, fb->base.height << 16,
+ DRM_MODE_ROTATE_270);
+
+ return intel_plane_check_stride(plane_state);
+}
+
static int i9xx_format_to_fourcc(int format)
{
switch (format) {
@@ -2843,6 +3165,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_set_plane_visible(crtc_state, plane_state, false);
fixup_active_planes(crtc_state);
+ crtc_state->data_rate[plane->id] = 0;
if (plane->id == PLANE_PRIMARY)
intel_pre_disable_primary_noatomic(&crtc->base);
@@ -2968,41 +3291,55 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb,
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
- switch (cpp) {
- case 8:
- return 4096;
- case 4:
- case 2:
- case 1:
- return 8192;
- default:
- MISSING_CASE(cpp);
- break;
- }
- break;
+ return 4096;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
/* FIXME AUX plane? */
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
- switch (cpp) {
- case 8:
+ if (cpp == 8)
return 2048;
- case 4:
+ else
return 4096;
- case 2:
- case 1:
- return 8192;
- default:
- MISSING_CASE(cpp);
- break;
- }
- break;
default:
MISSING_CASE(fb->modifier);
+ return 2048;
}
+}
- return 2048;
+static int glk_max_plane_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ int cpp = fb->format->cpp[color_plane];
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ if (cpp == 8)
+ return 4096;
+ else
+ return 5120;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ /* FIXME AUX plane? */
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ if (cpp == 8)
+ return 2048;
+ else
+ return 5120;
+ default:
+ MISSING_CASE(fb->modifier);
+ return 2048;
+ }
+}
+
+static int icl_max_plane_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ return 5120;
}
static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
@@ -3045,16 +3382,24 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state
static int skl_check_main_surface(struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
int x = plane_state->base.src.x1 >> 16;
int y = plane_state->base.src.y1 >> 16;
int w = drm_rect_width(&plane_state->base.src) >> 16;
int h = drm_rect_height(&plane_state->base.src) >> 16;
- int max_width = skl_max_plane_width(fb, 0, rotation);
+ int max_width;
int max_height = 4096;
u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
+ if (INTEL_GEN(dev_priv) >= 11)
+ max_width = icl_max_plane_width(fb, 0, rotation);
+ else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ max_width = glk_max_plane_width(fb, 0, rotation);
+ else
+ max_width = skl_max_plane_width(fb, 0, rotation);
+
if (w > max_width || h > max_height) {
DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
w, h, max_width, max_height);
@@ -3117,6 +3462,14 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
plane_state->color_plane[0].x = x;
plane_state->color_plane[0].y = y;
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate(&plane_state->base.src,
+ (x << 16) - plane_state->base.src.x1,
+ (y << 16) - plane_state->base.src.y1);
+
return 0;
}
@@ -3173,26 +3526,15 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
int skl_check_plane_surface(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
int ret;
- intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
- plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
- plane_state->color_plane[1].stride = intel_fb_pitch(fb, 1, rotation);
-
- ret = intel_plane_check_stride(plane_state);
+ ret = intel_plane_compute_gtt(plane_state);
if (ret)
return ret;
if (!plane_state->base.visible)
return 0;
- /* Rotate src coordinates to match rotated GTT view */
- if (drm_rotation_90_or_270(rotation))
- drm_rect_rotate(&plane_state->base.src,
- fb->width << 16, fb->height << 16,
- DRM_MODE_ROTATE_270);
-
/*
* Handle the AUX surface first since
* the main surface setup depends on it.
@@ -3322,20 +3664,20 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
- int src_x = plane_state->base.src.x1 >> 16;
- int src_y = plane_state->base.src.y1 >> 16;
+ int src_x, src_y;
u32 offset;
int ret;
- intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
- plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
-
- ret = intel_plane_check_stride(plane_state);
+ ret = intel_plane_compute_gtt(plane_state);
if (ret)
return ret;
+ if (!plane_state->base.visible)
+ return 0;
+
+ src_x = plane_state->base.src.x1 >> 16;
+ src_y = plane_state->base.src.y1 >> 16;
+
intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
if (INTEL_GEN(dev_priv) >= 4)
@@ -3344,8 +3686,17 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
else
offset = 0;
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate(&plane_state->base.src,
+ (src_x << 16) - plane_state->base.src.x1,
+ (src_y << 16) - plane_state->base.src.y1);
+
/* HSW/BDW do this automagically in hardware */
if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
+ unsigned int rotation = plane_state->base.rotation;
int src_w = drm_rect_width(&plane_state->base.src) >> 16;
int src_h = drm_rect_height(&plane_state->base.src) >> 16;
@@ -3382,6 +3733,10 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ ret = i9xx_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
if (!plane_state->base.visible)
return 0;
@@ -3389,10 +3744,6 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = i9xx_check_plane_surface(plane_state);
- if (ret)
- return ret;
-
plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
return 0;
@@ -3531,15 +3882,6 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
-static u32
-intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
-{
- if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
- return 64;
- else
- return intel_tile_width_bytes(fb, color_plane);
-}
-
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
{
struct drm_device *dev = intel_crtc->base.dev;
@@ -5019,6 +5361,21 @@ u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
return ((phase >> 2) & PS_PHASE_MASK) | trip;
}
+#define SKL_MIN_SRC_W 8
+#define SKL_MAX_SRC_W 4096
+#define SKL_MIN_SRC_H 8
+#define SKL_MAX_SRC_H 4096
+#define SKL_MIN_DST_W 8
+#define SKL_MAX_DST_W 4096
+#define SKL_MIN_DST_H 8
+#define SKL_MAX_DST_H 4096
+#define ICL_MAX_SRC_W 5120
+#define ICL_MAX_SRC_H 4096
+#define ICL_MAX_DST_W 5120
+#define ICL_MAX_DST_H 4096
+#define SKL_MIN_YUV_420_SRC_W 16
+#define SKL_MIN_YUV_420_SRC_H 16
+
static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
unsigned int scaler_user, int *scaler_id,
@@ -5298,10 +5655,8 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
if (IS_BROADWELL(dev_priv)) {
- mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
IPS_ENABLE | IPS_PCODE_CONTROL));
- mutex_unlock(&dev_priv->pcu_lock);
/* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the
* mailbox." Moreover, the mailbox may return a bogus state,
@@ -5331,9 +5686,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
return;
if (IS_BROADWELL(dev_priv)) {
- mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
- mutex_unlock(&dev_priv->pcu_lock);
/*
* Wait for PCODE to finish disabling IPS. The BSpec specified
* 42ms timeout value leads to occasional timeouts so use 100ms
@@ -5509,6 +5862,16 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
return false;
}
+static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *crtc_state)
+{
+ /* Wa_2006604312:icl */
+ if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
+ return true;
+
+ return false;
+}
+
static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
@@ -5542,11 +5905,13 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
intel_post_enable_primary(&crtc->base, pipe_config);
}
- /* Display WA 827 */
if (needs_nv12_wa(dev_priv, old_crtc_state) &&
- !needs_nv12_wa(dev_priv, pipe_config)) {
+ !needs_nv12_wa(dev_priv, pipe_config))
skl_wa_827(dev_priv, crtc->pipe, false);
- }
+
+ if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
+ !needs_scalerclk_wa(dev_priv, pipe_config))
+ icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
}
static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
@@ -5583,9 +5948,13 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
/* Display WA 827 */
if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
- needs_nv12_wa(dev_priv, pipe_config)) {
+ needs_nv12_wa(dev_priv, pipe_config))
skl_wa_827(dev_priv, crtc->pipe, true);
- }
+
+ /* Wa_2006604312:icl */
+ if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
+ needs_scalerclk_wa(dev_priv, pipe_config))
+ icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
/*
* Vblank time updates from the shadow to live plane control register
@@ -5991,7 +6360,8 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (!transcoder_is_dsi(cpu_transcoder))
haswell_set_pipeconf(pipe_config);
- haswell_set_pipemisc(pipe_config);
+ if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ bdw_set_pipemisc(pipe_config);
intel_crtc->active = true;
@@ -6293,7 +6663,7 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
if (crtc_state->shared_dpll)
- mask |= BIT_ULL(POWER_DOMAIN_PLLS);
+ mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
return mask;
}
@@ -6520,6 +6890,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_bw_state *bw_state =
+ to_intel_bw_state(dev_priv->bw_obj.state);
enum intel_display_power_domain domain;
struct intel_plane *plane;
u64 domains;
@@ -6582,6 +6954,9 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
dev_priv->min_cdclk[intel_crtc->pipe] = 0;
dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
+
+ bw_state->data_rate[intel_crtc->pipe] = 0;
+ bw_state->num_active_planes[intel_crtc->pipe] = 0;
}
/*
@@ -6876,7 +7251,7 @@ static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
if (WARN_ON(!pfit_w || !pfit_h))
return pixel_rate;
- pixel_rate = div_u64((u64)pixel_rate * pipe_w * pipe_h,
+ pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
pfit_w * pfit_h);
}
@@ -6996,7 +7371,7 @@ static void compute_m_n(unsigned int m, unsigned int n,
else
*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
- *ret_m = div_u64((u64)m * *ret_n, n);
+ *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
intel_reduce_m_n_ratio(ret_m, ret_n);
}
@@ -7229,7 +7604,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
return;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
bestn = pipe_config->dpll.n;
bestm1 = pipe_config->dpll.m1;
@@ -7306,7 +7681,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
- mutex_unlock(&dev_priv->sb_lock);
+
+ vlv_dpio_put(dev_priv);
}
static void chv_prepare_pll(struct intel_crtc *crtc,
@@ -7339,7 +7715,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
dpio_val = 0;
loopfilter = 0;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* p1 and p2 divider */
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
@@ -7411,7 +7787,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
DPIO_AFC_RECAL);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
}
/**
@@ -7679,9 +8055,14 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
tmp = I915_READ(HTOTAL(cpu_transcoder));
pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
- tmp = I915_READ(HBLANK(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
+
+ if (!transcoder_is_dsi(cpu_transcoder)) {
+ tmp = I915_READ(HBLANK(cpu_transcoder));
+ pipe_config->base.adjusted_mode.crtc_hblank_start =
+ (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_hblank_end =
+ ((tmp >> 16) & 0xffff) + 1;
+ }
tmp = I915_READ(HSYNC(cpu_transcoder));
pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
@@ -7689,9 +8070,14 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
tmp = I915_READ(VTOTAL(cpu_transcoder));
pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
- tmp = I915_READ(VBLANK(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
+
+ if (!transcoder_is_dsi(cpu_transcoder)) {
+ tmp = I915_READ(VBLANK(cpu_transcoder));
+ pipe_config->base.adjusted_mode.crtc_vblank_start =
+ (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_vblank_end =
+ ((tmp >> 16) & 0xffff) + 1;
+ }
tmp = I915_READ(VSYNC(cpu_transcoder));
pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
@@ -8037,9 +8423,9 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
return;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
clock.m2 = mdiv & DPIO_M2DIV_MASK;
@@ -8148,13 +8534,13 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
return;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
clock.m2 = (pll_dw0 & 0xff) << 22;
@@ -8280,6 +8666,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
i9xx_get_pipe_color_config(pipe_config);
+ intel_color_get_config(pipe_config);
if (INTEL_GEN(dev_priv) < 4)
pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
@@ -8654,7 +9041,7 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
}
/* Sequence to disable CLKOUT_DP */
-static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
+void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
{
u32 reg, tmp;
@@ -8740,22 +9127,95 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
#undef BEND_IDX
+static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
+{
+ u32 fuse_strap = I915_READ(FUSE_STRAP);
+ u32 ctl = I915_READ(SPLL_CTL);
+
+ if ((ctl & SPLL_PLL_ENABLE) == 0)
+ return false;
+
+ if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
+ (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
+ return true;
+
+ if (IS_BROADWELL(dev_priv) &&
+ (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
+ return true;
+
+ return false;
+}
+
+static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id id)
+{
+ u32 fuse_strap = I915_READ(FUSE_STRAP);
+ u32 ctl = I915_READ(WRPLL_CTL(id));
+
+ if ((ctl & WRPLL_PLL_ENABLE) == 0)
+ return false;
+
+ if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
+ return true;
+
+ if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
+ (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
+ (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
+ return true;
+
+ return false;
+}
+
static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
- bool has_vga = false;
+ bool pch_ssc_in_use = false;
+ bool has_fdi = false;
for_each_intel_encoder(&dev_priv->drm, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_ANALOG:
- has_vga = true;
+ has_fdi = true;
break;
default:
break;
}
}
- if (has_vga) {
+ /*
+ * The BIOS may have decided to use the PCH SSC
+ * reference so we must not disable it until the
+ * relevant PLLs have stopped relying on it. We'll
+ * just leave the PCH SSC reference enabled in case
+ * any active PLL is using it. It will get disabled
+ * after runtime suspend if we don't have FDI.
+ *
+ * TODO: Move the whole reference clock handling
+ * to the modeset sequence proper so that we can
+ * actually enable/disable/reconfigure these things
+ * safely. To do that we need to introduce a real
+ * clock hierarchy. That would also allow us to do
+ * clock bending finally.
+ */
+ if (spll_uses_pch_ssc(dev_priv)) {
+ DRM_DEBUG_KMS("SPLL using PCH SSC\n");
+ pch_ssc_in_use = true;
+ }
+
+ if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
+ DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
+ pch_ssc_in_use = true;
+ }
+
+ if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
+ DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
+ pch_ssc_in_use = true;
+ }
+
+ if (pch_ssc_in_use)
+ return;
+
+ if (has_fdi) {
lpt_bend_clkout_dp(dev_priv, 0);
lpt_enable_clkout_dp(dev_priv, true, true);
} else {
@@ -8837,44 +9297,68 @@ static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
POSTING_READ(PIPECONF(cpu_transcoder));
}
-static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state)
+static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 val = 0;
- if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
- u32 val = 0;
+ switch (crtc_state->pipe_bpp) {
+ case 18:
+ val |= PIPEMISC_DITHER_6_BPC;
+ break;
+ case 24:
+ val |= PIPEMISC_DITHER_8_BPC;
+ break;
+ case 30:
+ val |= PIPEMISC_DITHER_10_BPC;
+ break;
+ case 36:
+ val |= PIPEMISC_DITHER_12_BPC;
+ break;
+ default:
+ MISSING_CASE(crtc_state->pipe_bpp);
+ break;
+ }
- switch (crtc_state->pipe_bpp) {
- case 18:
- val |= PIPEMISC_DITHER_6_BPC;
- break;
- case 24:
- val |= PIPEMISC_DITHER_8_BPC;
- break;
- case 30:
- val |= PIPEMISC_DITHER_10_BPC;
- break;
- case 36:
- val |= PIPEMISC_DITHER_12_BPC;
- break;
- default:
- /* Case prevented by pipe_config_set_bpp. */
- BUG();
- }
+ if (crtc_state->dither)
+ val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+ crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+ val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ val |= PIPEMISC_YUV420_ENABLE |
+ PIPEMISC_YUV420_MODE_FULL_BLEND;
- if (crtc_state->dither)
- val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
+ if (INTEL_GEN(dev_priv) >= 11 &&
+ (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
+ BIT(PLANE_CURSOR))) == 0)
+ val |= PIPEMISC_HDR_MODE_PRECISION;
- if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
- crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
- val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
+ I915_WRITE(PIPEMISC(crtc->pipe), val);
+}
- if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
- val |= PIPEMISC_YUV420_ENABLE |
- PIPEMISC_YUV420_MODE_FULL_BLEND;
+int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 tmp;
- I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
+ tmp = I915_READ(PIPEMISC(crtc->pipe));
+
+ switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
+ case PIPEMISC_DITHER_6_BPC:
+ return 18;
+ case PIPEMISC_DITHER_8_BPC:
+ return 24;
+ case PIPEMISC_DITHER_10_BPC:
+ return 30;
+ case PIPEMISC_DITHER_12_BPC:
+ return 36;
+ default:
+ MISSING_CASE(tmp);
+ return 0;
}
}
@@ -9353,6 +9837,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
i9xx_get_pipe_color_config(pipe_config);
+ intel_color_get_config(pipe_config);
if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
struct intel_shared_dpll *pll;
@@ -9409,228 +9894,6 @@ out:
return ret;
}
-
-static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(dev, crtc)
- I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
- pipe_name(crtc->pipe));
-
- I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
- "Display power well on\n");
- I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
- I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
- I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
- I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
- I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
- "CPU PWM1 enabled\n");
- if (IS_HASWELL(dev_priv))
- I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
- "CPU PWM2 enabled\n");
- I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
- "PCH PWM1 enabled\n");
- I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
- "Utility pin enabled\n");
- I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
-
- /*
- * In theory we can still leave IRQs enabled, as long as only the HPD
- * interrupts remain enabled. We used to check for that, but since it's
- * gen-specific and since we only disable LCPLL after we fully disable
- * the interrupts, the check below should be enough.
- */
- I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
-}
-
-static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
-{
- if (IS_HASWELL(dev_priv))
- return I915_READ(D_COMP_HSW);
- else
- return I915_READ(D_COMP_BDW);
-}
-
-static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
-{
- if (IS_HASWELL(dev_priv)) {
- mutex_lock(&dev_priv->pcu_lock);
- if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
- val))
- DRM_DEBUG_KMS("Failed to write to D_COMP\n");
- mutex_unlock(&dev_priv->pcu_lock);
- } else {
- I915_WRITE(D_COMP_BDW, val);
- POSTING_READ(D_COMP_BDW);
- }
-}
-
-/*
- * This function implements pieces of two sequences from BSpec:
- * - Sequence for display software to disable LCPLL
- * - Sequence for display software to allow package C8+
- * The steps implemented here are just the steps that actually touch the LCPLL
- * register. Callers should take care of disabling all the display engine
- * functions, doing the mode unset, fixing interrupts, etc.
- */
-static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
- bool switch_to_fclk, bool allow_power_down)
-{
- u32 val;
-
- assert_can_disable_lcpll(dev_priv);
-
- val = I915_READ(LCPLL_CTL);
-
- if (switch_to_fclk) {
- val |= LCPLL_CD_SOURCE_FCLK;
- I915_WRITE(LCPLL_CTL, val);
-
- if (wait_for_us(I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE, 1))
- DRM_ERROR("Switching to FCLK failed\n");
-
- val = I915_READ(LCPLL_CTL);
- }
-
- val |= LCPLL_PLL_DISABLE;
- I915_WRITE(LCPLL_CTL, val);
- POSTING_READ(LCPLL_CTL);
-
- if (intel_wait_for_register(&dev_priv->uncore,
- LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
- DRM_ERROR("LCPLL still locked\n");
-
- val = hsw_read_dcomp(dev_priv);
- val |= D_COMP_COMP_DISABLE;
- hsw_write_dcomp(dev_priv, val);
- ndelay(100);
-
- if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
- 1))
- DRM_ERROR("D_COMP RCOMP still in progress\n");
-
- if (allow_power_down) {
- val = I915_READ(LCPLL_CTL);
- val |= LCPLL_POWER_DOWN_ALLOW;
- I915_WRITE(LCPLL_CTL, val);
- POSTING_READ(LCPLL_CTL);
- }
-}
-
-/*
- * Fully restores LCPLL, disallowing power down and switching back to LCPLL
- * source.
- */
-static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- val = I915_READ(LCPLL_CTL);
-
- if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
- LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
- return;
-
- /*
- * Make sure we're not on PC8 state before disabling PC8, otherwise
- * we'll hang the machine. To prevent PC8 state, just enable force_wake.
- */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- if (val & LCPLL_POWER_DOWN_ALLOW) {
- val &= ~LCPLL_POWER_DOWN_ALLOW;
- I915_WRITE(LCPLL_CTL, val);
- POSTING_READ(LCPLL_CTL);
- }
-
- val = hsw_read_dcomp(dev_priv);
- val |= D_COMP_COMP_FORCE;
- val &= ~D_COMP_COMP_DISABLE;
- hsw_write_dcomp(dev_priv, val);
-
- val = I915_READ(LCPLL_CTL);
- val &= ~LCPLL_PLL_DISABLE;
- I915_WRITE(LCPLL_CTL, val);
-
- if (intel_wait_for_register(&dev_priv->uncore,
- LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
- 5))
- DRM_ERROR("LCPLL not locked yet\n");
-
- if (val & LCPLL_CD_SOURCE_FCLK) {
- val = I915_READ(LCPLL_CTL);
- val &= ~LCPLL_CD_SOURCE_FCLK;
- I915_WRITE(LCPLL_CTL, val);
-
- if (wait_for_us((I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
- DRM_ERROR("Switching back to LCPLL failed\n");
- }
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
- intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
-}
-
-/*
- * Package states C8 and deeper are really deep PC states that can only be
- * reached when all the devices on the system allow it, so even if the graphics
- * device allows PC8+, it doesn't mean the system will actually get to these
- * states. Our driver only allows PC8+ when going into runtime PM.
- *
- * The requirements for PC8+ are that all the outputs are disabled, the power
- * well is disabled and most interrupts are disabled, and these are also
- * requirements for runtime PM. When these conditions are met, we manually do
- * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
- * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
- * hang the machine.
- *
- * When we really reach PC8 or deeper states (not just when we allow it) we lose
- * the state of some registers, so when we come back from PC8+ we need to
- * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
- * need to take care of the registers kept by RC6. Notice that this happens even
- * if we don't put the device in PCI D3 state (which is what currently happens
- * because of the runtime PM support).
- *
- * For more, read "Display Sequences for Package C8" on the hardware
- * documentation.
- */
-void hsw_enable_pc8(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- DRM_DEBUG_KMS("Enabling package C8+\n");
-
- if (HAS_PCH_LPT_LP(dev_priv)) {
- val = I915_READ(SOUTH_DSPCLK_GATE_D);
- val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
- I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
- }
-
- lpt_disable_clkout_dp(dev_priv);
- hsw_disable_lcpll(dev_priv, true, true);
-}
-
-void hsw_disable_pc8(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- DRM_DEBUG_KMS("Disabling package C8+\n");
-
- hsw_restore_lcpll(dev_priv);
- lpt_init_pch_refclk(dev_priv);
-
- if (HAS_PCH_LPT_LP(dev_priv)) {
- val = I915_READ(SOUTH_DSPCLK_GATE_D);
- val |= PCH_LP_PARTITION_LEVEL_DISABLE;
- I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
- }
-}
-
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
@@ -9801,6 +10064,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
for_each_set_bit(panel_transcoder,
&panel_transcoder_mask,
ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
+ bool force_thru = false;
enum pipe trans_pipe;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
@@ -9822,6 +10086,8 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
transcoder_name(panel_transcoder));
/* fall through */
case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ force_thru = true;
+ /* fall through */
case TRANS_DDI_EDP_INPUT_A_ON:
trans_pipe = PIPE_A;
break;
@@ -9833,8 +10099,10 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
break;
}
- if (trans_pipe == crtc->pipe)
+ if (trans_pipe == crtc->pipe) {
pipe_config->cpu_transcoder = panel_transcoder;
+ pipe_config->pch_pfit.force_thru = force_thru;
+ }
}
/*
@@ -10018,6 +10286,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
i9xx_get_pipe_color_config(pipe_config);
}
+ intel_color_get_config(pipe_config);
+
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
WARN_ON(power_domain_mask & BIT_ULL(power_domain));
@@ -10119,19 +10389,17 @@ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
int src_x, src_y;
u32 offset;
int ret;
- intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
- plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
-
- ret = intel_plane_check_stride(plane_state);
+ ret = intel_plane_compute_gtt(plane_state);
if (ret)
return ret;
+ if (!plane_state->base.visible)
+ return 0;
+
src_x = plane_state->base.src_x >> 16;
src_y = plane_state->base.src_y >> 16;
@@ -10168,6 +10436,10 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ ret = intel_cursor_check_surface(plane_state);
+ if (ret)
+ return ret;
+
if (!plane_state->base.visible)
return 0;
@@ -10175,10 +10447,6 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = intel_cursor_check_surface(plane_state);
- if (ret)
- return ret;
-
return 0;
}
@@ -11105,6 +11373,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
if (!is_crtc_enabled) {
plane_state->visible = visible = false;
to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
+ to_intel_crtc_state(crtc_state)->data_rate[plane->id] = 0;
}
if (!was_visible && !visible)
@@ -11320,6 +11589,17 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
return 0;
}
+static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->base.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
+}
+
static int intel_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
@@ -11343,6 +11623,13 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
return ret;
}
+ /*
+ * May need to update pipe gamma enable bits
+ * when C8 planes are getting enabled/disabled.
+ */
+ if (c8_planes_changed(pipe_config))
+ crtc_state->color_mgmt_changed = true;
+
if (mode_changed || pipe_config->update_pipe ||
crtc_state->color_mgmt_changed) {
ret = intel_color_check(pipe_config);
@@ -11500,17 +11787,19 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
{
DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
- "type: 0x%x flags: 0x%x\n",
- mode->crtc_clock,
- mode->crtc_hdisplay, mode->crtc_hsync_start,
- mode->crtc_hsync_end, mode->crtc_htotal,
- mode->crtc_vdisplay, mode->crtc_vsync_start,
- mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
+ "type: 0x%x flags: 0x%x\n",
+ mode->crtc_clock,
+ mode->crtc_hdisplay, mode->crtc_hsync_start,
+ mode->crtc_hsync_end, mode->crtc_htotal,
+ mode->crtc_vdisplay, mode->crtc_vsync_start,
+ mode->crtc_vsync_end, mode->crtc_vtotal,
+ mode->type, mode->flags);
}
static inline void
-intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
- unsigned int lane_count, struct intel_link_m_n *m_n)
+intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
+ const char *id, unsigned int lane_count,
+ const struct intel_link_m_n *m_n)
{
DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
id, lane_count,
@@ -11588,26 +11877,54 @@ static const char *output_formats(enum intel_output_format format)
return output_format_str[format];
}
-static void intel_dump_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config,
+static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ struct drm_format_name_buf format_name;
+
+ if (!fb) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ yesno(plane_state->base.visible));
+ return;
+ }
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ fb->base.id, fb->width, fb->height,
+ drm_get_format_name(fb->format->format, &format_name),
+ yesno(plane_state->base.visible));
+ DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
+ plane_state->base.rotation, plane_state->scaler_id);
+ if (plane_state->base.visible)
+ DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
+ DRM_RECT_FP_ARG(&plane_state->base.src),
+ DRM_RECT_ARG(&plane_state->base.dst));
+}
+
+static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
+ struct intel_atomic_state *state,
const char *context)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_plane *plane;
- struct intel_plane *intel_plane;
- struct intel_plane_state *state;
- struct drm_framebuffer *fb;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
char buf[64];
+ int i;
- DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
- crtc->base.base.id, crtc->base.name, context);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
+ crtc->base.base.id, crtc->base.name,
+ yesno(pipe_config->base.enable), context);
- snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
- DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
- buf, pipe_config->output_types);
+ if (!pipe_config->base.enable)
+ goto dump_planes;
- DRM_DEBUG_KMS("output format: %s\n",
+ snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
+ DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
+ yesno(pipe_config->base.active),
+ buf, pipe_config->output_types,
output_formats(pipe_config->output_format));
DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
@@ -11628,10 +11945,8 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
&pipe_config->dp_m2_n2);
}
- DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
- pipe_config->has_audio, pipe_config->has_infoframe);
-
- DRM_DEBUG_KMS("infoframes enabled: 0x%x\n",
+ DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
+ pipe_config->has_audio, pipe_config->has_infoframe,
pipe_config->infoframes.enable);
if (pipe_config->infoframes.enable &
@@ -11669,51 +11984,30 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->gmch_pfit.pgm_ratios,
pipe_config->gmch_pfit.lvds_border_bits);
else
- DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
+ DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
pipe_config->pch_pfit.pos,
pipe_config->pch_pfit.size,
- enableddisabled(pipe_config->pch_pfit.enabled));
+ enableddisabled(pipe_config->pch_pfit.enabled),
+ yesno(pipe_config->pch_pfit.force_thru));
DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
pipe_config->ips_enabled, pipe_config->double_wide);
intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
- DRM_DEBUG_KMS("planes on this crtc\n");
- list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
- struct drm_format_name_buf format_name;
- intel_plane = to_intel_plane(plane);
- if (intel_plane->pipe != crtc->pipe)
- continue;
-
- state = to_intel_plane_state(plane->state);
- fb = state->base.fb;
- if (!fb) {
- DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
- plane->base.id, plane->name, state->scaler_id);
- continue;
- }
+dump_planes:
+ if (!state)
+ return;
- DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
- plane->base.id, plane->name,
- fb->base.id, fb->width, fb->height,
- drm_get_format_name(fb->format->format, &format_name));
- if (INTEL_GEN(dev_priv) >= 9)
- DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
- state->scaler_id,
- state->base.src.x1 >> 16,
- state->base.src.y1 >> 16,
- drm_rect_width(&state->base.src) >> 16,
- drm_rect_height(&state->base.src) >> 16,
- state->base.dst.x1, state->base.dst.y1,
- drm_rect_width(&state->base.dst),
- drm_rect_height(&state->base.dst));
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->pipe == crtc->pipe)
+ intel_dump_plane_state(plane_state);
}
}
-static bool check_digital_port_conflicts(struct drm_atomic_state *state)
+static bool check_digital_port_conflicts(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->dev;
+ struct drm_device *dev = state->base.dev;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
unsigned int used_ports = 0;
@@ -11730,7 +12024,9 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
- connector_state = drm_atomic_get_new_connector_state(state, connector);
+ connector_state =
+ drm_atomic_get_new_connector_state(&state->base,
+ connector);
if (!connector_state)
connector_state = connector->state;
@@ -11794,7 +12090,6 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
saved_state->scaler_state = crtc_state->scaler_state;
saved_state->shared_dpll = crtc_state->shared_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
- saved_state->pch_pfit.force_thru = crtc_state->pch_pfit.force_thru;
saved_state->crc_enabled = crtc_state->crc_enabled;
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -11810,9 +12105,9 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
}
static int
-intel_modeset_pipe_config(struct drm_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
{
+ struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_atomic_state *state = pipe_config->base.state;
struct intel_encoder *encoder;
struct drm_connector *connector;
@@ -11997,18 +12292,14 @@ intel_compare_m_n(unsigned int m, unsigned int n,
static bool
intel_compare_link_m_n(const struct intel_link_m_n *m_n,
- struct intel_link_m_n *m2_n2,
- bool adjust)
-{
- if (m_n->tu == m2_n2->tu &&
- intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
- m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
- intel_compare_m_n(m_n->link_m, m_n->link_n,
- m2_n2->link_m, m2_n2->link_n, !adjust)) {
- return true;
- }
-
- return false;
+ const struct intel_link_m_n *m2_n2,
+ bool exact)
+{
+ return m_n->tu == m2_n2->tu &&
+ intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
+ m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
+ intel_compare_m_n(m_n->link_m, m_n->link_n,
+ m2_n2->link_m, m2_n2->link_n, exact);
}
static bool
@@ -12019,16 +12310,16 @@ intel_compare_infoframe(const union hdmi_infoframe *a,
}
static void
-pipe_config_infoframe_err(struct drm_i915_private *dev_priv,
- bool adjust, const char *name,
- const union hdmi_infoframe *a,
- const union hdmi_infoframe *b)
+pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
+ bool fastset, const char *name,
+ const union hdmi_infoframe *a,
+ const union hdmi_infoframe *b)
{
- if (adjust) {
+ if (fastset) {
if ((drm_debug & DRM_UT_KMS) == 0)
return;
- drm_dbg(DRM_UT_KMS, "mismatch in %s infoframe", name);
+ drm_dbg(DRM_UT_KMS, "fastset mismatch in %s infoframe", name);
drm_dbg(DRM_UT_KMS, "expected:");
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
drm_dbg(DRM_UT_KMS, "found");
@@ -12043,7 +12334,7 @@ pipe_config_infoframe_err(struct drm_i915_private *dev_priv,
}
static void __printf(3, 4)
-pipe_config_err(bool adjust, const char *name, const char *format, ...)
+pipe_config_mismatch(bool fastset, const char *name, const char *format, ...)
{
struct va_format vaf;
va_list args;
@@ -12052,8 +12343,8 @@ pipe_config_err(bool adjust, const char *name, const char *format, ...)
vaf.fmt = format;
vaf.va = &args;
- if (adjust)
- drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
+ if (fastset)
+ drm_dbg(DRM_UT_KMS, "fastset mismatch in %s %pV", name, &vaf);
else
drm_err("mismatch in %s %pV", name, &vaf);
@@ -12078,14 +12369,13 @@ static bool fastboot_enabled(struct drm_i915_private *dev_priv)
}
static bool
-intel_pipe_config_compare(struct drm_i915_private *dev_priv,
- struct intel_crtc_state *current_config,
- struct intel_crtc_state *pipe_config,
- bool adjust)
+intel_pipe_config_compare(const struct intel_crtc_state *current_config,
+ const struct intel_crtc_state *pipe_config,
+ bool fastset)
{
- struct intel_crtc *crtc = to_intel_crtc(current_config->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev);
bool ret = true;
- bool fixup_inherited = adjust &&
+ bool fixup_inherited = fastset &&
(current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
!(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
@@ -12096,30 +12386,30 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
#define PIPE_CONF_CHECK_X(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_err(adjust, __stringify(name), \
- "(expected 0x%08x, found 0x%08x)\n", \
- current_config->name, \
- pipe_config->name); \
+ pipe_config_mismatch(fastset, __stringify(name), \
+ "(expected 0x%08x, found 0x%08x)\n", \
+ current_config->name, \
+ pipe_config->name); \
ret = false; \
} \
} while (0)
#define PIPE_CONF_CHECK_I(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_err(adjust, __stringify(name), \
- "(expected %i, found %i)\n", \
- current_config->name, \
- pipe_config->name); \
+ pipe_config_mismatch(fastset, __stringify(name), \
+ "(expected %i, found %i)\n", \
+ current_config->name, \
+ pipe_config->name); \
ret = false; \
} \
} while (0)
#define PIPE_CONF_CHECK_BOOL(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_err(adjust, __stringify(name), \
- "(expected %s, found %s)\n", \
- yesno(current_config->name), \
- yesno(pipe_config->name)); \
+ pipe_config_mismatch(fastset, __stringify(name), \
+ "(expected %s, found %s)\n", \
+ yesno(current_config->name), \
+ yesno(pipe_config->name)); \
ret = false; \
} \
} while (0)
@@ -12133,20 +12423,20 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
PIPE_CONF_CHECK_BOOL(name); \
} else { \
- pipe_config_err(adjust, __stringify(name), \
- "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
- yesno(current_config->name), \
- yesno(pipe_config->name)); \
+ pipe_config_mismatch(fastset, __stringify(name), \
+ "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
+ yesno(current_config->name), \
+ yesno(pipe_config->name)); \
ret = false; \
} \
} while (0)
#define PIPE_CONF_CHECK_P(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_err(adjust, __stringify(name), \
- "(expected %p, found %p)\n", \
- current_config->name, \
- pipe_config->name); \
+ pipe_config_mismatch(fastset, __stringify(name), \
+ "(expected %p, found %p)\n", \
+ current_config->name, \
+ pipe_config->name); \
ret = false; \
} \
} while (0)
@@ -12154,20 +12444,20 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
#define PIPE_CONF_CHECK_M_N(name) do { \
if (!intel_compare_link_m_n(&current_config->name, \
&pipe_config->name,\
- adjust)) { \
- pipe_config_err(adjust, __stringify(name), \
- "(expected tu %i gmch %i/%i link %i/%i, " \
- "found tu %i, gmch %i/%i link %i/%i)\n", \
- current_config->name.tu, \
- current_config->name.gmch_m, \
- current_config->name.gmch_n, \
- current_config->name.link_m, \
- current_config->name.link_n, \
- pipe_config->name.tu, \
- pipe_config->name.gmch_m, \
- pipe_config->name.gmch_n, \
- pipe_config->name.link_m, \
- pipe_config->name.link_n); \
+ !fastset)) { \
+ pipe_config_mismatch(fastset, __stringify(name), \
+ "(expected tu %i gmch %i/%i link %i/%i, " \
+ "found tu %i, gmch %i/%i link %i/%i)\n", \
+ current_config->name.tu, \
+ current_config->name.gmch_m, \
+ current_config->name.gmch_n, \
+ current_config->name.link_m, \
+ current_config->name.link_n, \
+ pipe_config->name.tu, \
+ pipe_config->name.gmch_m, \
+ pipe_config->name.gmch_n, \
+ pipe_config->name.link_m, \
+ pipe_config->name.link_n); \
ret = false; \
} \
} while (0)
@@ -12179,49 +12469,49 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
*/
#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
if (!intel_compare_link_m_n(&current_config->name, \
- &pipe_config->name, adjust) && \
+ &pipe_config->name, !fastset) && \
!intel_compare_link_m_n(&current_config->alt_name, \
- &pipe_config->name, adjust)) { \
- pipe_config_err(adjust, __stringify(name), \
- "(expected tu %i gmch %i/%i link %i/%i, " \
- "or tu %i gmch %i/%i link %i/%i, " \
- "found tu %i, gmch %i/%i link %i/%i)\n", \
- current_config->name.tu, \
- current_config->name.gmch_m, \
- current_config->name.gmch_n, \
- current_config->name.link_m, \
- current_config->name.link_n, \
- current_config->alt_name.tu, \
- current_config->alt_name.gmch_m, \
- current_config->alt_name.gmch_n, \
- current_config->alt_name.link_m, \
- current_config->alt_name.link_n, \
- pipe_config->name.tu, \
- pipe_config->name.gmch_m, \
- pipe_config->name.gmch_n, \
- pipe_config->name.link_m, \
- pipe_config->name.link_n); \
+ &pipe_config->name, !fastset)) { \
+ pipe_config_mismatch(fastset, __stringify(name), \
+ "(expected tu %i gmch %i/%i link %i/%i, " \
+ "or tu %i gmch %i/%i link %i/%i, " \
+ "found tu %i, gmch %i/%i link %i/%i)\n", \
+ current_config->name.tu, \
+ current_config->name.gmch_m, \
+ current_config->name.gmch_n, \
+ current_config->name.link_m, \
+ current_config->name.link_n, \
+ current_config->alt_name.tu, \
+ current_config->alt_name.gmch_m, \
+ current_config->alt_name.gmch_n, \
+ current_config->alt_name.link_m, \
+ current_config->alt_name.link_n, \
+ pipe_config->name.tu, \
+ pipe_config->name.gmch_m, \
+ pipe_config->name.gmch_n, \
+ pipe_config->name.link_m, \
+ pipe_config->name.link_n); \
ret = false; \
} \
} while (0)
#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
- pipe_config_err(adjust, __stringify(name), \
- "(%x) (expected %i, found %i)\n", \
- (mask), \
- current_config->name & (mask), \
- pipe_config->name & (mask)); \
+ pipe_config_mismatch(fastset, __stringify(name), \
+ "(%x) (expected %i, found %i)\n", \
+ (mask), \
+ current_config->name & (mask), \
+ pipe_config->name & (mask)); \
ret = false; \
} \
} while (0)
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
- pipe_config_err(adjust, __stringify(name), \
- "(expected %i, found %i)\n", \
- current_config->name, \
- pipe_config->name); \
+ pipe_config_mismatch(fastset, __stringify(name), \
+ "(expected %i, found %i)\n", \
+ current_config->name, \
+ pipe_config->name); \
ret = false; \
} \
} while (0)
@@ -12229,9 +12519,9 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
if (!intel_compare_infoframe(&current_config->infoframes.name, \
&pipe_config->infoframes.name)) { \
- pipe_config_infoframe_err(dev_priv, adjust, __stringify(name), \
- &current_config->infoframes.name, \
- &pipe_config->infoframes.name); \
+ pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
+ &current_config->infoframes.name, \
+ &pipe_config->infoframes.name); \
ret = false; \
} \
} while (0)
@@ -12281,7 +12571,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
- PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
+ PIPE_CONF_CHECK_BOOL(has_infoframe);
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
@@ -12309,11 +12599,9 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
* Changing the EDP transcoder input mux
* (A_ONOFF vs. A_ON) requires a full modeset.
*/
- if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A &&
- current_config->cpu_transcoder == TRANSCODER_EDP)
- PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
+ PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
- if (!adjust) {
+ if (!fastset) {
PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h);
@@ -12386,6 +12674,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_INFOFRAME(avi);
PIPE_CONF_CHECK_INFOFRAME(spd);
PIPE_CONF_CHECK_INFOFRAME(hdmi);
+ PIPE_CONF_CHECK_INFOFRAME(drm);
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
@@ -12692,13 +12981,10 @@ verify_crtc_state(struct drm_crtc *crtc,
intel_pipe_config_sanity_check(dev_priv, pipe_config);
sw_config = to_intel_crtc_state(new_crtc_state);
- if (!intel_pipe_config_compare(dev_priv, sw_config,
- pipe_config, false)) {
+ if (!intel_pipe_config_compare(sw_config, pipe_config, false)) {
I915_STATE_WARN(1, "pipe state doesn't match!\n");
- intel_dump_pipe_config(intel_crtc, pipe_config,
- "[hw state]");
- intel_dump_pipe_config(intel_crtc, sw_config,
- "[sw state]");
+ intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
+ intel_dump_pipe_config(sw_config, NULL, "[sw state]");
}
}
@@ -12879,31 +13165,30 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
crtc->scanline_offset = 1;
}
-static void intel_modeset_clear_plls(struct drm_atomic_state *state)
+static void intel_modeset_clear_plls(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
int i;
if (!dev_priv->display.crtc_compute_clock)
return;
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
struct intel_shared_dpll *old_dpll =
- to_intel_crtc_state(old_crtc_state)->shared_dpll;
+ old_crtc_state->shared_dpll;
- if (!needs_modeset(new_crtc_state))
+ if (!needs_modeset(&new_crtc_state->base))
continue;
- to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
+ new_crtc_state->shared_dpll = NULL;
if (!old_dpll)
continue;
- intel_release_shared_dpll(old_dpll, intel_crtc, state);
+ intel_release_shared_dpll(old_dpll, crtc, &state->base);
}
}
@@ -12913,29 +13198,27 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state)
* multiple pipes, and planes are enabled after the pipe, we need to wait at
* least 2 vblanks on the first pipe before enabling planes on the second pipe.
*/
-static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
+static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
{
- struct drm_crtc_state *crtc_state;
- struct intel_crtc *intel_crtc;
- struct drm_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
struct intel_crtc_state *first_crtc_state = NULL;
struct intel_crtc_state *other_crtc_state = NULL;
enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
int i;
/* look at all crtc's that are going to be enabled in during modeset */
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- intel_crtc = to_intel_crtc(crtc);
-
- if (!crtc_state->active || !needs_modeset(crtc_state))
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ if (!crtc_state->base.active ||
+ !needs_modeset(&crtc_state->base))
continue;
if (first_crtc_state) {
- other_crtc_state = to_intel_crtc_state(crtc_state);
+ other_crtc_state = crtc_state;
break;
} else {
- first_crtc_state = to_intel_crtc_state(crtc_state);
- first_pipe = intel_crtc->pipe;
+ first_crtc_state = crtc_state;
+ first_pipe = crtc->pipe;
}
}
@@ -12944,24 +13227,22 @@ static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
return 0;
/* w/a possibly needed, check how many crtc's are already enabled. */
- for_each_intel_crtc(state->dev, intel_crtc) {
- struct intel_crtc_state *pipe_config;
-
- pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
- if (IS_ERR(pipe_config))
- return PTR_ERR(pipe_config);
+ for_each_intel_crtc(state->base.dev, crtc) {
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
- pipe_config->hsw_workaround_pipe = INVALID_PIPE;
+ crtc_state->hsw_workaround_pipe = INVALID_PIPE;
- if (!pipe_config->base.active ||
- needs_modeset(&pipe_config->base))
+ if (!crtc_state->base.active ||
+ needs_modeset(&crtc_state->base))
continue;
/* 2 or more enabled crtcs means no need for w/a */
if (enabled_pipe != INVALID_PIPE)
return 0;
- enabled_pipe = intel_crtc->pipe;
+ enabled_pipe = crtc->pipe;
}
if (enabled_pipe != INVALID_PIPE)
@@ -13021,12 +13302,11 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state)
return 0;
}
-static int intel_modeset_checks(struct drm_atomic_state *state)
+static int intel_modeset_checks(struct intel_atomic_state *state)
{
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
int ret = 0, i;
if (!check_digital_port_conflicts(state)) {
@@ -13035,24 +13315,24 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
}
/* keep the current setting */
- if (!intel_state->cdclk.force_min_cdclk_changed)
- intel_state->cdclk.force_min_cdclk =
- dev_priv->cdclk.force_min_cdclk;
-
- intel_state->modeset = true;
- intel_state->active_crtcs = dev_priv->active_crtcs;
- intel_state->cdclk.logical = dev_priv->cdclk.logical;
- intel_state->cdclk.actual = dev_priv->cdclk.actual;
- intel_state->cdclk.pipe = INVALID_PIPE;
-
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- if (new_crtc_state->active)
- intel_state->active_crtcs |= 1 << i;
+ if (!state->cdclk.force_min_cdclk_changed)
+ state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
+
+ state->modeset = true;
+ state->active_crtcs = dev_priv->active_crtcs;
+ state->cdclk.logical = dev_priv->cdclk.logical;
+ state->cdclk.actual = dev_priv->cdclk.actual;
+ state->cdclk.pipe = INVALID_PIPE;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (new_crtc_state->base.active)
+ state->active_crtcs |= 1 << i;
else
- intel_state->active_crtcs &= ~(1 << i);
+ state->active_crtcs &= ~(1 << i);
- if (old_crtc_state->active != new_crtc_state->active)
- intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
+ if (old_crtc_state->base.active != new_crtc_state->base.active)
+ state->active_pipe_changes |= drm_crtc_mask(&crtc->base);
}
/*
@@ -13075,19 +13355,19 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
* touching the hardware
*/
if (intel_cdclk_changed(&dev_priv->cdclk.logical,
- &intel_state->cdclk.logical)) {
- ret = intel_lock_all_pipes(state);
+ &state->cdclk.logical)) {
+ ret = intel_lock_all_pipes(&state->base);
if (ret < 0)
return ret;
}
- if (is_power_of_2(intel_state->active_crtcs)) {
+ if (is_power_of_2(state->active_crtcs)) {
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- pipe = ilog2(intel_state->active_crtcs);
+ pipe = ilog2(state->active_crtcs);
crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
- crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(&state->base, crtc);
if (crtc_state && needs_modeset(crtc_state))
pipe = INVALID_PIPE;
} else {
@@ -13098,27 +13378,27 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
if (pipe != INVALID_PIPE &&
intel_cdclk_needs_cd2x_update(dev_priv,
&dev_priv->cdclk.actual,
- &intel_state->cdclk.actual)) {
- ret = intel_lock_all_pipes(state);
+ &state->cdclk.actual)) {
+ ret = intel_lock_all_pipes(&state->base);
if (ret < 0)
return ret;
- intel_state->cdclk.pipe = pipe;
+ state->cdclk.pipe = pipe;
} else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
- &intel_state->cdclk.actual)) {
- ret = intel_modeset_all_pipes(state);
+ &state->cdclk.actual)) {
+ ret = intel_modeset_all_pipes(&state->base);
if (ret < 0)
return ret;
- intel_state->cdclk.pipe = INVALID_PIPE;
+ state->cdclk.pipe = INVALID_PIPE;
}
DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
- intel_state->cdclk.logical.cdclk,
- intel_state->cdclk.actual.cdclk);
+ state->cdclk.logical.cdclk,
+ state->cdclk.actual.cdclk);
DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
- intel_state->cdclk.logical.voltage_level,
- intel_state->cdclk.actual.voltage_level);
+ state->cdclk.logical.voltage_level,
+ state->cdclk.actual.voltage_level);
}
intel_modeset_clear_plls(state);
@@ -13146,14 +13426,10 @@ static int calc_watermark_data(struct intel_atomic_state *state)
return 0;
}
-static void intel_crtc_check_fastset(struct intel_crtc_state *old_crtc_state,
+static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(new_crtc_state->base.crtc->dev);
-
- if (!intel_pipe_config_compare(dev_priv, old_crtc_state,
- new_crtc_state, true))
+ if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
return;
new_crtc_state->base.mode_changed = false;
@@ -13176,85 +13452,105 @@ static void intel_crtc_check_fastset(struct intel_crtc_state *old_crtc_state,
/**
* intel_atomic_check - validate state object
* @dev: drm device
- * @state: state to validate
+ * @_state: state to validate
*/
static int intel_atomic_check(struct drm_device *dev,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *_state)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *crtc_state;
+ struct intel_atomic_state *state = to_intel_atomic_state(_state);
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
int ret, i;
- bool any_ms = intel_state->cdclk.force_min_cdclk_changed;
+ bool any_ms = state->cdclk.force_min_cdclk_changed;
/* Catch I915_MODE_FLAG_INHERITED */
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
- crtc_state, i) {
- if (crtc_state->mode.private_flags !=
- old_crtc_state->mode.private_flags)
- crtc_state->mode_changed = true;
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (new_crtc_state->base.mode.private_flags !=
+ old_crtc_state->base.mode.private_flags)
+ new_crtc_state->base.mode_changed = true;
}
- ret = drm_atomic_helper_check_modeset(dev, state);
+ ret = drm_atomic_helper_check_modeset(dev, &state->base);
if (ret)
- return ret;
-
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
- struct intel_crtc_state *pipe_config =
- to_intel_crtc_state(crtc_state);
+ goto fail;
- if (!needs_modeset(crtc_state))
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!needs_modeset(&new_crtc_state->base))
continue;
- if (!crtc_state->enable) {
+ if (!new_crtc_state->base.enable) {
any_ms = true;
continue;
}
- ret = intel_modeset_pipe_config(crtc, pipe_config);
- if (ret == -EDEADLK)
- return ret;
- if (ret) {
- intel_dump_pipe_config(to_intel_crtc(crtc),
- pipe_config, "[failed]");
- return ret;
- }
+ ret = intel_modeset_pipe_config(new_crtc_state);
+ if (ret)
+ goto fail;
- intel_crtc_check_fastset(to_intel_crtc_state(old_crtc_state),
- pipe_config);
+ intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
- if (needs_modeset(crtc_state))
+ if (needs_modeset(&new_crtc_state->base))
any_ms = true;
-
- intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
- needs_modeset(crtc_state) ?
- "[modeset]" : "[fastset]");
}
- ret = drm_dp_mst_atomic_check(state);
+ ret = drm_dp_mst_atomic_check(&state->base);
if (ret)
- return ret;
+ goto fail;
if (any_ms) {
ret = intel_modeset_checks(state);
-
if (ret)
- return ret;
+ goto fail;
} else {
- intel_state->cdclk.logical = dev_priv->cdclk.logical;
+ state->cdclk.logical = dev_priv->cdclk.logical;
}
- ret = icl_add_linked_planes(intel_state);
+ ret = icl_add_linked_planes(state);
if (ret)
- return ret;
+ goto fail;
+
+ ret = drm_atomic_helper_check_planes(dev, &state->base);
+ if (ret)
+ goto fail;
+
+ intel_fbc_choose_crtc(dev_priv, state);
+ ret = calc_watermark_data(state);
+ if (ret)
+ goto fail;
- ret = drm_atomic_helper_check_planes(dev, state);
+ ret = intel_bw_atomic_check(state);
if (ret)
+ goto fail;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!needs_modeset(&new_crtc_state->base) &&
+ !new_crtc_state->update_pipe)
+ continue;
+
+ intel_dump_pipe_config(new_crtc_state, state,
+ needs_modeset(&new_crtc_state->base) ?
+ "[modeset]" : "[fastset]");
+ }
+
+ return 0;
+
+ fail:
+ if (ret == -EDEADLK)
return ret;
- intel_fbc_choose_crtc(dev_priv, intel_state);
- return calc_watermark_data(intel_state);
+ /*
+ * FIXME would probably be nice to know which crtc specifically
+ * caused the failure, in cases where we can pinpoint it.
+ */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i)
+ intel_dump_pipe_config(new_crtc_state, state, "[failed]");
+
+ return ret;
}
static int intel_atomic_prepare_commit(struct drm_device *dev,
@@ -13643,6 +13939,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
}
+ intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
/*
* Defer the cleanup of the old state to a separate worker to not
@@ -13721,6 +14018,8 @@ static int intel_atomic_commit(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
+ intel_state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
drm_atomic_state_get(state);
i915_sw_fence_init(&intel_state->commit_ready,
intel_atomic_commit_ready);
@@ -13757,6 +14056,7 @@ static int intel_atomic_commit(struct drm_device *dev,
if (ret) {
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
i915_sw_fence_commit(&intel_state->commit_ready);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
return ret;
}
@@ -13768,6 +14068,7 @@ static int intel_atomic_commit(struct drm_device *dev,
i915_sw_fence_commit(&intel_state->commit_ready);
drm_atomic_helper_cleanup_planes(dev, state);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
return ret;
}
dev_priv->wm.distrust_bios_wm = false;
@@ -13966,7 +14267,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
*/
if (needs_modeset(crtc_state)) {
ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
- old_obj->resv, NULL,
+ old_obj->base.resv, NULL,
false, 0,
GFP_KERNEL);
if (ret < 0)
@@ -14010,13 +14311,13 @@ intel_prepare_plane_fb(struct drm_plane *plane,
struct dma_fence *fence;
ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
- obj->resv, NULL,
+ obj->base.resv, NULL,
false, I915_FENCE_TIMEOUT,
GFP_KERNEL);
if (ret < 0)
return ret;
- fence = reservation_object_get_excl_rcu(obj->resv);
+ fence = reservation_object_get_excl_rcu(obj->base.resv);
if (fence) {
add_rps_boost_after_vblank(new_state->crtc, fence);
dma_fence_put(fence);
@@ -14129,6 +14430,9 @@ static void intel_begin_crtc_commit(struct intel_atomic_state *state,
else if (INTEL_GEN(dev_priv) >= 9)
skl_detach_scalers(new_crtc_state);
+ if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ bdw_set_pipemisc(new_crtc_state);
+
out:
if (dev_priv->display.atomic_update_watermarks)
dev_priv->display.atomic_update_watermarks(state,
@@ -14239,8 +14543,6 @@ static const struct drm_plane_funcs i965_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = i965_plane_format_mod_supported,
@@ -14250,8 +14552,6 @@ static const struct drm_plane_funcs i8xx_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = i8xx_plane_format_mod_supported,
@@ -14392,8 +14692,6 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.update_plane = intel_legacy_cursor_update,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = intel_cursor_format_mod_supported,
@@ -14629,9 +14927,8 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
ret = -ENOMEM;
goto fail;
}
+ __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base);
intel_crtc->config = crtc_state;
- intel_crtc->base.state = &crtc_state->base;
- crtc_state->base.crtc = &intel_crtc->base;
primary = intel_primary_plane_create(dev_priv, pipe);
if (IS_ERR(primary)) {
@@ -15084,31 +15381,13 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
.dirty = intel_user_framebuffer_dirty,
};
-static
-u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
- u32 pixel_format, u64 fb_modifier)
-{
- struct intel_crtc *crtc;
- struct intel_plane *plane;
-
- /*
- * We assume the primary plane for pipe A has
- * the highest stride limits of them all.
- */
- crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
- plane = to_intel_plane(crtc->base.primary);
-
- return plane->max_stride(plane, pixel_format, fb_modifier,
- DRM_MODE_ROTATE_0);
-}
-
static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_framebuffer *fb = &intel_fb->base;
- u32 pitch_limit;
+ u32 max_stride;
unsigned int tiling, stride;
int ret = -EINVAL;
int i;
@@ -15160,13 +15439,13 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err;
}
- pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->pixel_format,
- mode_cmd->modifier[0]);
- if (mode_cmd->pitches[0] > pitch_limit) {
+ max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
+ mode_cmd->modifier[0]);
+ if (mode_cmd->pitches[0] > max_stride) {
DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
"tiled" : "linear",
- mode_cmd->pitches[0], pitch_limit);
+ mode_cmd->pitches[0], max_stride);
goto err;
}
@@ -15443,6 +15722,16 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.update_crtcs = intel_update_crtcs;
}
+static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
+{
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ return VLV_VGACNTRL;
+ else if (INTEL_GEN(dev_priv) >= 5)
+ return CPU_VGACNTRL;
+ else
+ return VGACNTRL;
+}
+
/* Disable the VGA plane that we never use */
static void i915_disable_vga(struct drm_i915_private *dev_priv)
{
@@ -15642,6 +15931,10 @@ int intel_modeset_init(struct drm_device *dev)
drm_mode_config_init(dev);
+ ret = intel_bw_init(dev_priv);
+ if (ret)
+ return ret;
+
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
@@ -15680,16 +15973,22 @@ int intel_modeset_init(struct drm_device *dev)
}
}
- /* maximum framebuffer dimensions */
- if (IS_GEN(dev_priv, 2)) {
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
+ /*
+ * Maximum framebuffer dimensions, chosen to match
+ * the maximum render engine surface size on gen4+.
+ */
+ if (INTEL_GEN(dev_priv) >= 7) {
+ dev->mode_config.max_width = 16384;
+ dev->mode_config.max_height = 16384;
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
} else if (IS_GEN(dev_priv, 3)) {
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
} else {
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
}
if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
@@ -16173,7 +16472,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
__drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
memset(crtc_state, 0, sizeof(*crtc_state));
- crtc_state->base.crtc = &crtc->base;
+ __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base);
crtc_state->base.active = crtc_state->base.enable =
dev_priv->display.get_pipe_config(crtc, crtc_state);
@@ -16264,8 +16563,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
drm_connector_list_iter_end(&conn_iter);
for_each_intel_crtc(dev, crtc) {
+ struct intel_bw_state *bw_state =
+ to_intel_bw_state(dev_priv->bw_obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane;
int min_cdclk = 0;
memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
@@ -16304,6 +16606,21 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
dev_priv->min_voltage_level[crtc->pipe] =
crtc_state->min_voltage_level;
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ /*
+ * FIXME don't have the fb yet, so can't
+ * use intel_plane_data_rate() :(
+ */
+ if (plane_state->base.visible)
+ crtc_state->data_rate[plane->id] =
+ 4 * crtc_state->pixel_rate;
+ }
+
+ intel_bw_crtc_update(bw_state, crtc_state);
+
intel_pipe_config_sanity_check(dev_priv, crtc_state);
}
}
@@ -16453,8 +16770,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
for_each_intel_crtc(&dev_priv->drm, crtc) {
crtc_state = to_intel_crtc_state(crtc->base.state);
intel_sanitize_crtc(crtc, ctx);
- intel_dump_pipe_config(crtc, crtc_state,
- "[setup_hw_state]");
+ intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
}
intel_modeset_update_connector_atomic_state(dev);
@@ -16588,7 +16904,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_overlay_cleanup(dev_priv);
- intel_teardown_gmbus(dev_priv);
+ intel_gmbus_teardown(dev_priv);
destroy_workqueue(dev_priv->modeset_wq);
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 2220588e86ac..ee6b8194a459 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -28,6 +28,9 @@
#include <drm/drm_util.h>
#include <drm/i915_drm.h>
+struct drm_i915_private;
+struct intel_plane_state;
+
enum i915_gpio {
GPIOA,
GPIOB,
@@ -217,64 +220,6 @@ enum aux_ch {
#define aux_ch_name(a) ((a) + 'A')
-enum intel_display_power_domain {
- POWER_DOMAIN_PIPE_A,
- POWER_DOMAIN_PIPE_B,
- POWER_DOMAIN_PIPE_C,
- POWER_DOMAIN_PIPE_A_PANEL_FITTER,
- POWER_DOMAIN_PIPE_B_PANEL_FITTER,
- POWER_DOMAIN_PIPE_C_PANEL_FITTER,
- POWER_DOMAIN_TRANSCODER_A,
- POWER_DOMAIN_TRANSCODER_B,
- POWER_DOMAIN_TRANSCODER_C,
- POWER_DOMAIN_TRANSCODER_EDP,
- POWER_DOMAIN_TRANSCODER_EDP_VDSC,
- POWER_DOMAIN_TRANSCODER_DSI_A,
- POWER_DOMAIN_TRANSCODER_DSI_C,
- POWER_DOMAIN_PORT_DDI_A_LANES,
- POWER_DOMAIN_PORT_DDI_B_LANES,
- POWER_DOMAIN_PORT_DDI_C_LANES,
- POWER_DOMAIN_PORT_DDI_D_LANES,
- POWER_DOMAIN_PORT_DDI_E_LANES,
- POWER_DOMAIN_PORT_DDI_F_LANES,
- POWER_DOMAIN_PORT_DDI_A_IO,
- POWER_DOMAIN_PORT_DDI_B_IO,
- POWER_DOMAIN_PORT_DDI_C_IO,
- POWER_DOMAIN_PORT_DDI_D_IO,
- POWER_DOMAIN_PORT_DDI_E_IO,
- POWER_DOMAIN_PORT_DDI_F_IO,
- POWER_DOMAIN_PORT_DSI,
- POWER_DOMAIN_PORT_CRT,
- POWER_DOMAIN_PORT_OTHER,
- POWER_DOMAIN_VGA,
- POWER_DOMAIN_AUDIO,
- POWER_DOMAIN_PLLS,
- POWER_DOMAIN_AUX_A,
- POWER_DOMAIN_AUX_B,
- POWER_DOMAIN_AUX_C,
- POWER_DOMAIN_AUX_D,
- POWER_DOMAIN_AUX_E,
- POWER_DOMAIN_AUX_F,
- POWER_DOMAIN_AUX_IO_A,
- POWER_DOMAIN_AUX_TBT1,
- POWER_DOMAIN_AUX_TBT2,
- POWER_DOMAIN_AUX_TBT3,
- POWER_DOMAIN_AUX_TBT4,
- POWER_DOMAIN_GMBUS,
- POWER_DOMAIN_MODESET,
- POWER_DOMAIN_GT_IRQ,
- POWER_DOMAIN_INIT,
-
- POWER_DOMAIN_NUM,
-};
-
-#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
-#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
- ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
-#define POWER_DOMAIN_TRANSCODER(tran) \
- ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
- (tran) + POWER_DOMAIN_TRANSCODER_A)
-
/* Used by dp and fdi links */
struct intel_link_m_n {
u32 tu;
@@ -361,30 +306,6 @@ struct intel_link_m_n {
list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
for_each_if((intel_connector)->base.encoder == (__encoder))
-#define for_each_power_domain(domain, mask) \
- for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
- for_each_if(BIT_ULL(domain) & (mask))
-
-#define for_each_power_well(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
- (__power_well) - (__dev_priv)->power_domains.power_wells < \
- (__dev_priv)->power_domains.power_well_count; \
- (__power_well)++)
-
-#define for_each_power_well_reverse(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
- (__dev_priv)->power_domains.power_well_count - 1; \
- (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
- (__power_well)--)
-
-#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
- for_each_power_well(__dev_priv, __power_well) \
- for_each_if((__power_well)->desc->domains & (__domain_mask))
-
-#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \
- for_each_power_well_reverse(__dev_priv, __power_well) \
- for_each_if((__power_well)->desc->domains & (__domain_mask))
-
#define for_each_old_intel_plane_in_state(__state, plane, old_plane_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
@@ -432,4 +353,9 @@ void intel_link_compute_m_n(u16 bpp, int nlanes,
struct intel_link_m_n *m_n,
bool constant_n);
bool is_ccs_modifier(u64 modifier);
+void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
+u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
+ u32 pixel_format, u64 modifier);
+bool intel_plane_can_remap(const struct intel_plane_state *plane_state);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
new file mode 100644
index 000000000000..c93ad512014c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -0,0 +1,4618 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/vgaarb.h>
+
+#include "display/intel_crt.h"
+#include "display/intel_dp.h"
+
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "intel_cdclk.h"
+#include "intel_combo_phy.h"
+#include "intel_csr.h"
+#include "intel_dpio_phy.h"
+#include "intel_drv.h"
+#include "intel_hotplug.h"
+#include "intel_sideband.h"
+
+bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+ enum i915_power_well_id power_well_id);
+
+const char *
+intel_display_power_domain_str(enum intel_display_power_domain domain)
+{
+ switch (domain) {
+ case POWER_DOMAIN_DISPLAY_CORE:
+ return "DISPLAY_CORE";
+ case POWER_DOMAIN_PIPE_A:
+ return "PIPE_A";
+ case POWER_DOMAIN_PIPE_B:
+ return "PIPE_B";
+ case POWER_DOMAIN_PIPE_C:
+ return "PIPE_C";
+ case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
+ return "PIPE_A_PANEL_FITTER";
+ case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
+ return "PIPE_B_PANEL_FITTER";
+ case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
+ return "PIPE_C_PANEL_FITTER";
+ case POWER_DOMAIN_TRANSCODER_A:
+ return "TRANSCODER_A";
+ case POWER_DOMAIN_TRANSCODER_B:
+ return "TRANSCODER_B";
+ case POWER_DOMAIN_TRANSCODER_C:
+ return "TRANSCODER_C";
+ case POWER_DOMAIN_TRANSCODER_EDP:
+ return "TRANSCODER_EDP";
+ case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
+ return "TRANSCODER_EDP_VDSC";
+ case POWER_DOMAIN_TRANSCODER_DSI_A:
+ return "TRANSCODER_DSI_A";
+ case POWER_DOMAIN_TRANSCODER_DSI_C:
+ return "TRANSCODER_DSI_C";
+ case POWER_DOMAIN_PORT_DDI_A_LANES:
+ return "PORT_DDI_A_LANES";
+ case POWER_DOMAIN_PORT_DDI_B_LANES:
+ return "PORT_DDI_B_LANES";
+ case POWER_DOMAIN_PORT_DDI_C_LANES:
+ return "PORT_DDI_C_LANES";
+ case POWER_DOMAIN_PORT_DDI_D_LANES:
+ return "PORT_DDI_D_LANES";
+ case POWER_DOMAIN_PORT_DDI_E_LANES:
+ return "PORT_DDI_E_LANES";
+ case POWER_DOMAIN_PORT_DDI_F_LANES:
+ return "PORT_DDI_F_LANES";
+ case POWER_DOMAIN_PORT_DDI_A_IO:
+ return "PORT_DDI_A_IO";
+ case POWER_DOMAIN_PORT_DDI_B_IO:
+ return "PORT_DDI_B_IO";
+ case POWER_DOMAIN_PORT_DDI_C_IO:
+ return "PORT_DDI_C_IO";
+ case POWER_DOMAIN_PORT_DDI_D_IO:
+ return "PORT_DDI_D_IO";
+ case POWER_DOMAIN_PORT_DDI_E_IO:
+ return "PORT_DDI_E_IO";
+ case POWER_DOMAIN_PORT_DDI_F_IO:
+ return "PORT_DDI_F_IO";
+ case POWER_DOMAIN_PORT_DSI:
+ return "PORT_DSI";
+ case POWER_DOMAIN_PORT_CRT:
+ return "PORT_CRT";
+ case POWER_DOMAIN_PORT_OTHER:
+ return "PORT_OTHER";
+ case POWER_DOMAIN_VGA:
+ return "VGA";
+ case POWER_DOMAIN_AUDIO:
+ return "AUDIO";
+ case POWER_DOMAIN_AUX_A:
+ return "AUX_A";
+ case POWER_DOMAIN_AUX_B:
+ return "AUX_B";
+ case POWER_DOMAIN_AUX_C:
+ return "AUX_C";
+ case POWER_DOMAIN_AUX_D:
+ return "AUX_D";
+ case POWER_DOMAIN_AUX_E:
+ return "AUX_E";
+ case POWER_DOMAIN_AUX_F:
+ return "AUX_F";
+ case POWER_DOMAIN_AUX_IO_A:
+ return "AUX_IO_A";
+ case POWER_DOMAIN_AUX_TBT1:
+ return "AUX_TBT1";
+ case POWER_DOMAIN_AUX_TBT2:
+ return "AUX_TBT2";
+ case POWER_DOMAIN_AUX_TBT3:
+ return "AUX_TBT3";
+ case POWER_DOMAIN_AUX_TBT4:
+ return "AUX_TBT4";
+ case POWER_DOMAIN_GMBUS:
+ return "GMBUS";
+ case POWER_DOMAIN_INIT:
+ return "INIT";
+ case POWER_DOMAIN_MODESET:
+ return "MODESET";
+ case POWER_DOMAIN_GT_IRQ:
+ return "GT_IRQ";
+ default:
+ MISSING_CASE(domain);
+ return "?";
+ }
+}
+
+static void intel_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
+ power_well->desc->ops->enable(dev_priv, power_well);
+ power_well->hw_enabled = true;
+}
+
+static void intel_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
+ power_well->hw_enabled = false;
+ power_well->desc->ops->disable(dev_priv, power_well);
+}
+
+static void intel_power_well_get(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if (!power_well->count++)
+ intel_power_well_enable(dev_priv, power_well);
+}
+
+static void intel_power_well_put(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN(!power_well->count, "Use count on power well %s is already zero",
+ power_well->desc->name);
+
+ if (!--power_well->count)
+ intel_power_well_disable(dev_priv, power_well);
+}
+
+/**
+ * __intel_display_power_is_enabled - unlocked check for a power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to check
+ *
+ * This is the unlocked version of intel_display_power_is_enabled() and should
+ * only be used from error capture and recovery code where deadlocks are
+ * possible.
+ *
+ * Returns:
+ * True when the power domain is enabled, false otherwise.
+ */
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_well *power_well;
+ bool is_enabled;
+
+ if (dev_priv->runtime_pm.suspended)
+ return false;
+
+ is_enabled = true;
+
+ for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
+ if (power_well->desc->always_on)
+ continue;
+
+ if (!power_well->hw_enabled) {
+ is_enabled = false;
+ break;
+ }
+ }
+
+ return is_enabled;
+}
+
+/**
+ * intel_display_power_is_enabled - check for a power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to check
+ *
+ * This function can be used to check the hw power domain state. It is mostly
+ * used in hardware state readout functions. Everywhere else code should rely
+ * upon explicit power domain reference counting to ensure that the hardware
+ * block is powered up before accessing it.
+ *
+ * Callers must hold the relevant modesetting locks to ensure that concurrent
+ * threads can't disable the power well while the caller tries to read a few
+ * registers.
+ *
+ * Returns:
+ * True when the power domain is enabled, false otherwise.
+ */
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ bool ret;
+
+ power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+ ret = __intel_display_power_is_enabled(dev_priv, domain);
+ mutex_unlock(&power_domains->lock);
+
+ return ret;
+}
+
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
+static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
+ u8 irq_pipe_mask, bool has_vga)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+
+ /*
+ * After we re-enable the power well, if we touch VGA register 0x3d5
+ * we'll get unclaimed register interrupts. This stops after we write
+ * anything to the VGA MSR register. The vgacon module uses this
+ * register all the time, so if we unbind our driver and, as a
+ * consequence, bind vgacon, we'll get stuck in an infinite loop at
+ * console_unlock(). So make here we touch the VGA MSR register, making
+ * sure vgacon can keep working normally without triggering interrupts
+ * and error messages.
+ */
+ if (has_vga) {
+ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
+ }
+
+ if (irq_pipe_mask)
+ gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
+}
+
+static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
+ u8 irq_pipe_mask)
+{
+ if (irq_pipe_mask)
+ gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
+}
+
+static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
+
+ /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
+ WARN_ON(intel_wait_for_register(&dev_priv->uncore,
+ regs->driver,
+ HSW_PWR_WELL_CTL_STATE(pw_idx),
+ HSW_PWR_WELL_CTL_STATE(pw_idx),
+ 1));
+}
+
+static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
+ const struct i915_power_well_regs *regs,
+ int pw_idx)
+{
+ u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
+ u32 ret;
+
+ ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
+ ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
+ if (regs->kvmr.reg)
+ ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
+ ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
+
+ return ret;
+}
+
+static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
+ bool disabled;
+ u32 reqs;
+
+ /*
+ * Bspec doesn't require waiting for PWs to get disabled, but still do
+ * this for paranoia. The known cases where a PW will be forced on:
+ * - a KVMR request on any power well via the KVMR request register
+ * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
+ * DEBUG request registers
+ * Skip the wait in case any of the request bits are set and print a
+ * diagnostic message.
+ */
+ wait_for((disabled = !(I915_READ(regs->driver) &
+ HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
+ (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
+ if (disabled)
+ return;
+
+ DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
+ power_well->desc->name,
+ !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
+}
+
+static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
+ enum skl_power_gate pg)
+{
+ /* Timeout 5us for PG#0, for other PGs 1us */
+ WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
+ SKL_FUSE_PG_DIST_STATUS(pg),
+ SKL_FUSE_PG_DIST_STATUS(pg), 1));
+}
+
+static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
+ bool wait_fuses = power_well->desc->hsw.has_fuses;
+ enum skl_power_gate uninitialized_var(pg);
+ u32 val;
+
+ if (wait_fuses) {
+ pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+ SKL_PW_CTL_IDX_TO_PG(pw_idx);
+ /*
+ * For PW1 we have to wait both for the PW0/PG0 fuse state
+ * before enabling the power well and PW1/PG1's own fuse
+ * state after the enabling. For all other power wells with
+ * fuses we only have to wait for that PW/PG's fuse state
+ * after the enabling.
+ */
+ if (pg == SKL_PG1)
+ gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
+ }
+
+ val = I915_READ(regs->driver);
+ I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+ hsw_wait_for_power_well_enable(dev_priv, power_well);
+
+ /* Display WA #1178: cnl */
+ if (IS_CANNONLAKE(dev_priv) &&
+ pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
+ pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
+ val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
+ val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
+ I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
+ }
+
+ if (wait_fuses)
+ gen9_wait_for_power_well_fuses(dev_priv, pg);
+
+ hsw_power_well_post_enable(dev_priv,
+ power_well->desc->hsw.irq_pipe_mask,
+ power_well->desc->hsw.has_vga);
+}
+
+static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
+ u32 val;
+
+ hsw_power_well_pre_disable(dev_priv,
+ power_well->desc->hsw.irq_pipe_mask);
+
+ val = I915_READ(regs->driver);
+ I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+ hsw_wait_for_power_well_disable(dev_priv, power_well);
+}
+
+#define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
+
+static void
+icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
+ enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
+ u32 val;
+
+ val = I915_READ(regs->driver);
+ I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+
+ val = I915_READ(ICL_PORT_CL_DW12(port));
+ I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
+
+ hsw_wait_for_power_well_enable(dev_priv, power_well);
+
+ /* Display WA #1178: icl */
+ if (IS_ICELAKE(dev_priv) &&
+ pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
+ !intel_bios_is_port_edp(dev_priv, port)) {
+ val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
+ val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
+ I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
+ }
+}
+
+static void
+icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
+ enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
+ u32 val;
+
+ val = I915_READ(ICL_PORT_CL_DW12(port));
+ I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
+
+ val = I915_READ(regs->driver);
+ I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+
+ hsw_wait_for_power_well_disable(dev_priv, power_well);
+}
+
+#define ICL_AUX_PW_TO_CH(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
+
+static void
+icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
+ u32 val;
+
+ val = I915_READ(DP_AUX_CH_CTL(aux_ch));
+ val &= ~DP_AUX_CH_CTL_TBT_IO;
+ if (power_well->desc->hsw.is_tc_tbt)
+ val |= DP_AUX_CH_CTL_TBT_IO;
+ I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
+
+ hsw_power_well_enable(dev_priv, power_well);
+}
+
+/*
+ * We should only use the power well if we explicitly asked the hardware to
+ * enable it, so check if it's enabled and also check if we've requested it to
+ * be enabled.
+ */
+static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ enum i915_power_well_id id = power_well->desc->id;
+ int pw_idx = power_well->desc->hsw.idx;
+ u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
+ HSW_PWR_WELL_CTL_STATE(pw_idx);
+ u32 val;
+
+ val = I915_READ(regs->driver);
+
+ /*
+ * On GEN9 big core due to a DMC bug the driver's request bits for PW1
+ * and the MISC_IO PW will be not restored, so check instead for the
+ * BIOS's own request bits, which are forced-on for these power wells
+ * when exiting DC5/6.
+ */
+ if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
+ (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
+ val |= I915_READ(regs->bios);
+
+ return (val & mask) == mask;
+}
+
+static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
+{
+ WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
+ "DC9 already programmed to be enabled.\n");
+ WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled to enable DC9.\n");
+ WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
+ HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
+ "Power well 2 on.\n");
+ WARN_ONCE(intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
+
+ /*
+ * TODO: check for the following to verify the conditions to enter DC9
+ * state are satisfied:
+ * 1] Check relevant display engine registers to verify if mode set
+ * disable sequence was followed.
+ * 2] Check if display uninitialize sequence is initialized.
+ */
+}
+
+static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
+{
+ WARN_ONCE(intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
+ WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled.\n");
+
+ /*
+ * TODO: check for the following to verify DC9 state was indeed
+ * entered before programming to disable it:
+ * 1] Check relevant display engine registers to verify if mode
+ * set disable sequence was followed.
+ * 2] Check if display uninitialize sequence is initialized.
+ */
+}
+
+static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
+ u32 state)
+{
+ int rewrites = 0;
+ int rereads = 0;
+ u32 v;
+
+ I915_WRITE(DC_STATE_EN, state);
+
+ /* It has been observed that disabling the dc6 state sometimes
+ * doesn't stick and dmc keeps returning old value. Make sure
+ * the write really sticks enough times and also force rewrite until
+ * we are confident that state is exactly what we want.
+ */
+ do {
+ v = I915_READ(DC_STATE_EN);
+
+ if (v != state) {
+ I915_WRITE(DC_STATE_EN, state);
+ rewrites++;
+ rereads = 0;
+ } else if (rereads++ > 5) {
+ break;
+ }
+
+ } while (rewrites < 100);
+
+ if (v != state)
+ DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
+ state, v);
+
+ /* Most of the times we need one retry, avoid spam */
+ if (rewrites > 1)
+ DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
+ state, rewrites);
+}
+
+static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
+{
+ u32 mask;
+
+ mask = DC_STATE_EN_UPTO_DC5;
+ if (INTEL_GEN(dev_priv) >= 11)
+ mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
+ else if (IS_GEN9_LP(dev_priv))
+ mask |= DC_STATE_EN_DC9;
+ else
+ mask |= DC_STATE_EN_UPTO_DC6;
+
+ return mask;
+}
+
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
+
+ DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
+ dev_priv->csr.dc_state, val);
+ dev_priv->csr.dc_state = val;
+}
+
+/**
+ * gen9_set_dc_state - set target display C power state
+ * @dev_priv: i915 device instance
+ * @state: target DC power state
+ * - DC_STATE_DISABLE
+ * - DC_STATE_EN_UPTO_DC5
+ * - DC_STATE_EN_UPTO_DC6
+ * - DC_STATE_EN_DC9
+ *
+ * Signal to DMC firmware/HW the target DC power state passed in @state.
+ * DMC/HW can turn off individual display clocks and power rails when entering
+ * a deeper DC power state (higher in number) and turns these back when exiting
+ * that state to a shallower power state (lower in number). The HW will decide
+ * when to actually enter a given state on an on-demand basis, for instance
+ * depending on the active state of display pipes. The state of display
+ * registers backed by affected power rails are saved/restored as needed.
+ *
+ * Based on the above enabling a deeper DC power state is asynchronous wrt.
+ * enabling it. Disabling a deeper power state is synchronous: for instance
+ * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
+ * back on and register state is restored. This is guaranteed by the MMIO write
+ * to DC_STATE_EN blocking until the state is restored.
+ */
+static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
+{
+ u32 val;
+ u32 mask;
+
+ if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
+ state &= dev_priv->csr.allowed_dc_mask;
+
+ val = I915_READ(DC_STATE_EN);
+ mask = gen9_dc_mask(dev_priv);
+ DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
+ val & mask, state);
+
+ /* Check if DMC is ignoring our DC state requests */
+ if ((val & mask) != dev_priv->csr.dc_state)
+ DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
+ dev_priv->csr.dc_state, val & mask);
+
+ val &= ~mask;
+ val |= state;
+
+ gen9_write_dc_state(dev_priv, val);
+
+ dev_priv->csr.dc_state = val & mask;
+}
+
+void bxt_enable_dc9(struct drm_i915_private *dev_priv)
+{
+ assert_can_enable_dc9(dev_priv);
+
+ DRM_DEBUG_KMS("Enabling DC9\n");
+ /*
+ * Power sequencer reset is not needed on
+ * platforms with South Display Engine on PCH,
+ * because PPS registers are always on.
+ */
+ if (!HAS_PCH_SPLIT(dev_priv))
+ intel_power_sequencer_reset(dev_priv);
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
+}
+
+void bxt_disable_dc9(struct drm_i915_private *dev_priv)
+{
+ assert_can_disable_dc9(dev_priv);
+
+ DRM_DEBUG_KMS("Disabling DC9\n");
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ intel_pps_unlock_regs_wa(dev_priv);
+}
+
+static void assert_csr_loaded(struct drm_i915_private *dev_priv)
+{
+ WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
+ "CSR program storage start is NULL\n");
+ WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
+ WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
+}
+
+static struct i915_power_well *
+lookup_power_well(struct drm_i915_private *dev_priv,
+ enum i915_power_well_id power_well_id)
+{
+ struct i915_power_well *power_well;
+
+ for_each_power_well(dev_priv, power_well)
+ if (power_well->desc->id == power_well_id)
+ return power_well;
+
+ /*
+ * It's not feasible to add error checking code to the callers since
+ * this condition really shouldn't happen and it doesn't even make sense
+ * to abort things like display initialization sequences. Just return
+ * the first power well and hope the WARN gets reported so we can fix
+ * our driver.
+ */
+ WARN(1, "Power well %d not defined for this platform\n", power_well_id);
+ return &dev_priv->power_domains.power_wells[0];
+}
+
+static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
+{
+ bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
+ SKL_DISP_PW_2);
+
+ WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
+
+ WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
+ "DC5 already programmed to be enabled.\n");
+ assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+
+ assert_csr_loaded(dev_priv);
+}
+
+void gen9_enable_dc5(struct drm_i915_private *dev_priv)
+{
+ assert_can_enable_dc5(dev_priv);
+
+ DRM_DEBUG_KMS("Enabling DC5\n");
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ if (IS_GEN9_BC(dev_priv))
+ I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+ SKL_SELECT_ALTERNATE_DC_EXIT);
+
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
+}
+
+static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
+{
+ WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ "Backlight is not disabled.\n");
+ WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
+ "DC6 already programmed to be enabled.\n");
+
+ assert_csr_loaded(dev_priv);
+}
+
+void skl_enable_dc6(struct drm_i915_private *dev_priv)
+{
+ assert_can_enable_dc6(dev_priv);
+
+ DRM_DEBUG_KMS("Enabling DC6\n");
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ if (IS_GEN9_BC(dev_priv))
+ I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+ SKL_SELECT_ALTERNATE_DC_EXIT);
+
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+}
+
+static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
+ u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
+ u32 bios_req = I915_READ(regs->bios);
+
+ /* Take over the request bit if set by BIOS. */
+ if (bios_req & mask) {
+ u32 drv_req = I915_READ(regs->driver);
+
+ if (!(drv_req & mask))
+ I915_WRITE(regs->driver, drv_req | mask);
+ I915_WRITE(regs->bios, bios_req & ~mask);
+ }
+}
+
+static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
+}
+
+static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
+}
+
+static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
+}
+
+static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *power_well;
+
+ power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
+ if (power_well->count > 0)
+ bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
+
+ power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ if (power_well->count > 0)
+ bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
+
+ if (IS_GEMINILAKE(dev_priv)) {
+ power_well = lookup_power_well(dev_priv,
+ GLK_DISP_PW_DPIO_CMN_C);
+ if (power_well->count > 0)
+ bxt_ddi_phy_verify_state(dev_priv,
+ power_well->desc->bxt.phy);
+ }
+}
+
+static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
+}
+
+static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
+{
+ u32 tmp = I915_READ(DBUF_CTL);
+
+ WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
+ (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
+ "Unexpected DBuf power power state (0x%08x)\n", tmp);
+}
+
+static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ struct intel_cdclk_state cdclk_state = {};
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
+ /* Can't read out voltage_level so can't use intel_cdclk_changed() */
+ WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
+
+ gen9_assert_dbuf_enabled(dev_priv);
+
+ if (IS_GEN9_LP(dev_priv))
+ bxt_verify_ddi_phy_power_wells(dev_priv);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ /*
+ * DMC retains HW context only for port A, the other combo
+ * PHY's HW context for port B is lost after DC transitions,
+ * so we need to restore it manually.
+ */
+ intel_combo_phy_init(dev_priv);
+}
+
+static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if (!dev_priv->csr.dmc_payload)
+ return;
+
+ if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
+ skl_enable_dc6(dev_priv);
+ else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
+ gen9_enable_dc5(dev_priv);
+}
+
+static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+}
+
+static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+}
+
+static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return true;
+}
+
+static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
+ i830_enable_pipe(dev_priv, PIPE_A);
+ if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
+ i830_enable_pipe(dev_priv, PIPE_B);
+}
+
+static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ i830_disable_pipe(dev_priv, PIPE_B);
+ i830_disable_pipe(dev_priv, PIPE_A);
+}
+
+static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
+ I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+}
+
+static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if (power_well->count > 0)
+ i830_pipes_power_well_enable(dev_priv, power_well);
+ else
+ i830_pipes_power_well_disable(dev_priv, power_well);
+}
+
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
+{
+ int pw_idx = power_well->desc->vlv.idx;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+
+ mask = PUNIT_PWRGT_MASK(pw_idx);
+ state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
+ PUNIT_PWRGT_PWR_GATE(pw_idx);
+
+ vlv_punit_get(dev_priv);
+
+#define COND \
+ ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+
+ if (COND)
+ goto out;
+
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+ ctrl &= ~mask;
+ ctrl |= state;
+ vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+
+ if (wait_for(COND, 100))
+ DRM_ERROR("timeout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+
+#undef COND
+
+out:
+ vlv_punit_put(dev_priv);
+}
+
+static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, true);
+}
+
+static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int pw_idx = power_well->desc->vlv.idx;
+ bool enabled = false;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+
+ mask = PUNIT_PWRGT_MASK(pw_idx);
+ ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
+
+ vlv_punit_get(dev_priv);
+
+ state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+ /*
+ * We only ever set the power-on and power-gate states, anything
+ * else is unexpected.
+ */
+ WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
+ state != PUNIT_PWRGT_PWR_GATE(pw_idx));
+ if (state == ctrl)
+ enabled = true;
+
+ /*
+ * A transient state at this point would mean some unexpected party
+ * is poking at the power controls too.
+ */
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+ WARN_ON(ctrl != state);
+
+ vlv_punit_put(dev_priv);
+
+ return enabled;
+}
+
+static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ /*
+ * On driver load, a pipe may be active and driving a DSI display.
+ * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
+ * (and never recovering) in this case. intel_dsi_post_disable() will
+ * clear it when we turn off the display.
+ */
+ val = I915_READ(DSPCLK_GATE_D);
+ val &= DPOUNIT_CLOCK_GATE_DISABLE;
+ val |= VRHUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, val);
+
+ /*
+ * Disable trickle feed and enable pnd deadline calculation
+ */
+ I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ I915_WRITE(CBR1_VLV, 0);
+
+ WARN_ON(dev_priv->rawclk_freq == 0);
+
+ I915_WRITE(RAWCLK_FREQ_VLV,
+ DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
+}
+
+static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+ enum pipe pipe;
+
+ /*
+ * Enable the CRI clock source so we can get at the
+ * display and the reference clock for VGA
+ * hotplug / manual detection. Supposedly DSI also
+ * needs the ref clock up and running.
+ *
+ * CHV DPLL B/C have some issues if VGA mode is enabled.
+ */
+ for_each_pipe(dev_priv, pipe) {
+ u32 val = I915_READ(DPLL(pipe));
+
+ val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (pipe != PIPE_A)
+ val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ I915_WRITE(DPLL(pipe), val);
+ }
+
+ vlv_init_display_clock_gating(dev_priv);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_enable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /*
+ * During driver initialization/resume we can avoid restoring the
+ * part of the HW/SW state that will be inited anyway explicitly.
+ */
+ if (dev_priv->power_domains.initializing)
+ return;
+
+ intel_hpd_init(dev_priv);
+
+ /* Re-enable the ADPA, if we have one */
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ if (encoder->type == INTEL_OUTPUT_ANALOG)
+ intel_crt_reset(&encoder->base);
+ }
+
+ i915_redisable_vga_power_on(dev_priv);
+
+ intel_pps_unlock_regs_wa(dev_priv);
+}
+
+static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_disable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* make sure we're done processing display irqs */
+ synchronize_irq(dev_priv->drm.irq);
+
+ intel_power_sequencer_reset(dev_priv);
+
+ /* Prevent us from re-enabling polling on accident in late suspend */
+ if (!dev_priv->drm.dev->power.is_suspended)
+ intel_hpd_poll_init(dev_priv);
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ vlv_display_power_well_init(dev_priv);
+}
+
+static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_display_power_well_deinit(dev_priv);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ /* since ref/cri clock was enabled */
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /*
+ * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+ * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
+ * a. GUnit 0x2110 bit[0] set to 1 (def 0)
+ * b. The other bits such as sfr settings / modesel may all
+ * be set to 0.
+ *
+ * This should only be done on init and resume from S3 with
+ * both PLLs disabled, or we risk losing DPIO and PLL
+ * synchronization.
+ */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
+static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe)
+ assert_pll_disabled(dev_priv, pipe);
+
+ /* Assert common reset */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
+
+#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
+
+static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn_bc =
+ lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ struct i915_power_well *cmn_d =
+ lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
+ u32 phy_control = dev_priv->chv_phy_control;
+ u32 phy_status = 0;
+ u32 phy_status_mask = 0xffffffff;
+
+ /*
+ * The BIOS can leave the PHY is some weird state
+ * where it doesn't fully power down some parts.
+ * Disable the asserts until the PHY has been fully
+ * reset (ie. the power well has been disabled at
+ * least once).
+ */
+ if (!dev_priv->chv_phy_assert[DPIO_PHY0])
+ phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
+ PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
+
+ if (!dev_priv->chv_phy_assert[DPIO_PHY1])
+ phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
+
+ if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
+ phy_status |= PHY_POWERGOOD(DPIO_PHY0);
+
+ /* this assumes override is only used to enable lanes */
+ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
+ phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
+
+ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
+ phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
+
+ /* CL1 is on whenever anything is on in either channel */
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
+ phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
+
+ /*
+ * The DPLLB check accounts for the pipe B + port A usage
+ * with CL2 powered up but all the lanes in the second channel
+ * powered down.
+ */
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
+ (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
+ phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
+ }
+
+ if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
+ phy_status |= PHY_POWERGOOD(DPIO_PHY1);
+
+ /* this assumes override is only used to enable lanes */
+ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
+ phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
+ phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
+ }
+
+ phy_status &= phy_status_mask;
+
+ /*
+ * The PHY may be busy with some initial calibration and whatnot,
+ * so the power state can take a while to actually change.
+ */
+ if (intel_wait_for_register(&dev_priv->uncore,
+ DISPLAY_PHY_STATUS,
+ phy_status_mask,
+ phy_status,
+ 10))
+ DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
+ I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
+ phy_status, dev_priv->chv_phy_control);
+}
+
+#undef BITS_SET
+
+static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum dpio_phy phy;
+ enum pipe pipe;
+ u32 tmp;
+
+ WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
+ power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
+
+ if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
+ pipe = PIPE_A;
+ phy = DPIO_PHY0;
+ } else {
+ pipe = PIPE_C;
+ phy = DPIO_PHY1;
+ }
+
+ /* since ref/cri clock was enabled */
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /* Poll for phypwrgood signal */
+ if (intel_wait_for_register(&dev_priv->uncore,
+ DISPLAY_PHY_STATUS,
+ PHY_POWERGOOD(phy),
+ PHY_POWERGOOD(phy),
+ 1))
+ DRM_ERROR("Display PHY %d is not power up\n", phy);
+
+ vlv_dpio_get(dev_priv);
+
+ /* Enable dynamic power down */
+ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
+ tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
+ DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
+
+ if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
+ tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
+ tmp |= DPIO_DYNPWRDOWNEN_CH1;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
+ } else {
+ /*
+ * Force the non-existing CL2 off. BXT does this
+ * too, so maybe it saves some power even though
+ * CL2 doesn't exist?
+ */
+ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
+ tmp |= DPIO_CL2_LDOFUSE_PWRENB;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
+ }
+
+ vlv_dpio_put(dev_priv);
+
+ dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
+ I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+ DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+ phy, dev_priv->chv_phy_control);
+
+ assert_chv_phy_status(dev_priv);
+}
+
+static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum dpio_phy phy;
+
+ WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
+ power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
+
+ if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
+ phy = DPIO_PHY0;
+ assert_pll_disabled(dev_priv, PIPE_A);
+ assert_pll_disabled(dev_priv, PIPE_B);
+ } else {
+ phy = DPIO_PHY1;
+ assert_pll_disabled(dev_priv, PIPE_C);
+ }
+
+ dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
+ I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+
+ DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+ phy, dev_priv->chv_phy_control);
+
+ /* PHY is fully reset now, so we can enable the PHY state asserts */
+ dev_priv->chv_phy_assert[phy] = true;
+
+ assert_chv_phy_status(dev_priv);
+}
+
+static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ enum dpio_channel ch, bool override, unsigned int mask)
+{
+ enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
+ u32 reg, val, expected, actual;
+
+ /*
+ * The BIOS can leave the PHY is some weird state
+ * where it doesn't fully power down some parts.
+ * Disable the asserts until the PHY has been fully
+ * reset (ie. the power well has been disabled at
+ * least once).
+ */
+ if (!dev_priv->chv_phy_assert[phy])
+ return;
+
+ if (ch == DPIO_CH0)
+ reg = _CHV_CMN_DW0_CH0;
+ else
+ reg = _CHV_CMN_DW6_CH1;
+
+ vlv_dpio_get(dev_priv);
+ val = vlv_dpio_read(dev_priv, pipe, reg);
+ vlv_dpio_put(dev_priv);
+
+ /*
+ * This assumes !override is only used when the port is disabled.
+ * All lanes should power down even without the override when
+ * the port is disabled.
+ */
+ if (!override || mask == 0xf) {
+ expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+ /*
+ * If CH1 common lane is not active anymore
+ * (eg. for pipe B DPLL) the entire channel will
+ * shut down, which causes the common lane registers
+ * to read as 0. That means we can't actually check
+ * the lane power down status bits, but as the entire
+ * register reads as 0 it's a good indication that the
+ * channel is indeed entirely powered down.
+ */
+ if (ch == DPIO_CH1 && val == 0)
+ expected = 0;
+ } else if (mask != 0x0) {
+ expected = DPIO_ANYDL_POWERDOWN;
+ } else {
+ expected = 0;
+ }
+
+ if (ch == DPIO_CH0)
+ actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
+ else
+ actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
+ actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+
+ WARN(actual != expected,
+ "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
+ !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
+ !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
+ reg, val);
+}
+
+bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ enum dpio_channel ch, bool override)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ bool was_override;
+
+ mutex_lock(&power_domains->lock);
+
+ was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+ if (override == was_override)
+ goto out;
+
+ if (override)
+ dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ else
+ dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+ I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+ DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
+ phy, ch, dev_priv->chv_phy_control);
+
+ assert_chv_phy_status(dev_priv);
+
+out:
+ mutex_unlock(&power_domains->lock);
+
+ return was_override;
+}
+
+void chv_phy_powergate_lanes(struct intel_encoder *encoder,
+ bool override, unsigned int mask)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
+ enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+
+ mutex_lock(&power_domains->lock);
+
+ dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
+ dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
+
+ if (override)
+ dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ else
+ dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+ I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+ DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
+ phy, ch, mask, dev_priv->chv_phy_control);
+
+ assert_chv_phy_status(dev_priv);
+
+ assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
+
+ mutex_unlock(&power_domains->lock);
+}
+
+static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum pipe pipe = PIPE_A;
+ bool enabled;
+ u32 state, ctrl;
+
+ vlv_punit_get(dev_priv);
+
+ state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
+ /*
+ * We only ever set the power-on and power-gate states, anything
+ * else is unexpected.
+ */
+ WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
+ enabled = state == DP_SSS_PWR_ON(pipe);
+
+ /*
+ * A transient state at this point would mean some unexpected party
+ * is poking at the power controls too.
+ */
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
+ WARN_ON(ctrl << 16 != state);
+
+ vlv_punit_put(dev_priv);
+
+ return enabled;
+}
+
+static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well,
+ bool enable)
+{
+ enum pipe pipe = PIPE_A;
+ u32 state;
+ u32 ctrl;
+
+ state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
+
+ vlv_punit_get(dev_priv);
+
+#define COND \
+ ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
+
+ if (COND)
+ goto out;
+
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ ctrl &= ~DP_SSC_MASK(pipe);
+ ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
+
+ if (wait_for(COND, 100))
+ DRM_ERROR("timeout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
+
+#undef COND
+
+out:
+ vlv_punit_put(dev_priv);
+}
+
+static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ chv_set_pipe_power_well(dev_priv, power_well, true);
+
+ vlv_display_power_well_init(dev_priv);
+}
+
+static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_display_power_well_deinit(dev_priv);
+
+ chv_set_pipe_power_well(dev_priv, power_well, false);
+}
+
+static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
+{
+ return power_domains->async_put_domains[0] |
+ power_domains->async_put_domains[1];
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
+static bool
+assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
+{
+ return !WARN_ON(power_domains->async_put_domains[0] &
+ power_domains->async_put_domains[1]);
+}
+
+static bool
+__async_put_domains_state_ok(struct i915_power_domains *power_domains)
+{
+ enum intel_display_power_domain domain;
+ bool err = false;
+
+ err |= !assert_async_put_domain_masks_disjoint(power_domains);
+ err |= WARN_ON(!!power_domains->async_put_wakeref !=
+ !!__async_put_domains_mask(power_domains));
+
+ for_each_power_domain(domain, __async_put_domains_mask(power_domains))
+ err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
+
+ return !err;
+}
+
+static void print_power_domains(struct i915_power_domains *power_domains,
+ const char *prefix, u64 mask)
+{
+ enum intel_display_power_domain domain;
+
+ DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
+ for_each_power_domain(domain, mask)
+ DRM_DEBUG_DRIVER("%s use_count %d\n",
+ intel_display_power_domain_str(domain),
+ power_domains->domain_use_count[domain]);
+}
+
+static void
+print_async_put_domains_state(struct i915_power_domains *power_domains)
+{
+ DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
+ power_domains->async_put_wakeref);
+
+ print_power_domains(power_domains, "async_put_domains[0]",
+ power_domains->async_put_domains[0]);
+ print_power_domains(power_domains, "async_put_domains[1]",
+ power_domains->async_put_domains[1]);
+}
+
+static void
+verify_async_put_domains_state(struct i915_power_domains *power_domains)
+{
+ if (!__async_put_domains_state_ok(power_domains))
+ print_async_put_domains_state(power_domains);
+}
+
+#else
+
+static void
+assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
+{
+}
+
+static void
+verify_async_put_domains_state(struct i915_power_domains *power_domains)
+{
+}
+
+#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
+
+static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
+{
+ assert_async_put_domain_masks_disjoint(power_domains);
+
+ return __async_put_domains_mask(power_domains);
+}
+
+static void
+async_put_domains_clear_domain(struct i915_power_domains *power_domains,
+ enum intel_display_power_domain domain)
+{
+ assert_async_put_domain_masks_disjoint(power_domains);
+
+ power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
+ power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
+}
+
+static bool
+intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ bool ret = false;
+
+ if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
+ goto out_verify;
+
+ async_put_domains_clear_domain(power_domains, domain);
+
+ ret = true;
+
+ if (async_put_domains_mask(power_domains))
+ goto out_verify;
+
+ cancel_delayed_work(&power_domains->async_put_work);
+ intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
+ fetch_and_zero(&power_domains->async_put_wakeref));
+out_verify:
+ verify_async_put_domains_state(power_domains);
+
+ return ret;
+}
+
+static void
+__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+
+ if (intel_display_power_grab_async_put_ref(dev_priv, domain))
+ return;
+
+ for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
+ intel_power_well_get(dev_priv, power_well);
+
+ power_domains->domain_use_count[domain]++;
+}
+
+/**
+ * intel_display_power_get - grab a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function grabs a power domain reference for @domain and ensures that the
+ * power domain and all its parents are powered up. Therefore users should only
+ * grab a reference to the innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_display_power_put() to release the reference again.
+ */
+intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ mutex_lock(&power_domains->lock);
+ __intel_display_power_get_domain(dev_priv, domain);
+ mutex_unlock(&power_domains->lock);
+
+ return wakeref;
+}
+
+/**
+ * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function grabs a power domain reference for @domain and ensures that the
+ * power domain and all its parents are powered up. Therefore users should only
+ * grab a reference to the innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_display_power_put() to release the reference again.
+ */
+intel_wakeref_t
+intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ intel_wakeref_t wakeref;
+ bool is_enabled;
+
+ wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
+ if (!wakeref)
+ return false;
+
+ mutex_lock(&power_domains->lock);
+
+ if (__intel_display_power_is_enabled(dev_priv, domain)) {
+ __intel_display_power_get_domain(dev_priv, domain);
+ is_enabled = true;
+ } else {
+ is_enabled = false;
+ }
+
+ mutex_unlock(&power_domains->lock);
+
+ if (!is_enabled) {
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ wakeref = 0;
+ }
+
+ return wakeref;
+}
+
+static void
+__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ struct i915_power_well *power_well;
+ const char *name = intel_display_power_domain_str(domain);
+
+ power_domains = &dev_priv->power_domains;
+
+ WARN(!power_domains->domain_use_count[domain],
+ "Use count on domain %s is already zero\n",
+ name);
+ WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain),
+ "Async disabling of domain %s is pending\n",
+ name);
+
+ power_domains->domain_use_count[domain]--;
+
+ for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
+ intel_power_well_put(dev_priv, power_well);
+}
+
+static void __intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+ __intel_display_power_put_domain(dev_priv, domain);
+ mutex_unlock(&power_domains->lock);
+}
+
+/**
+ * intel_display_power_put_unchecked - release an unchecked power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ *
+ * This function exists only for historical reasons and should be avoided in
+ * new code, as the correctness of its use cannot be checked. Always use
+ * intel_display_power_put() instead.
+ */
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ __intel_display_power_put(dev_priv, domain);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+}
+
+static void
+queue_async_put_domains_work(struct i915_power_domains *power_domains,
+ intel_wakeref_t wakeref)
+{
+ WARN_ON(power_domains->async_put_wakeref);
+ power_domains->async_put_wakeref = wakeref;
+ WARN_ON(!queue_delayed_work(system_unbound_wq,
+ &power_domains->async_put_work,
+ msecs_to_jiffies(100)));
+}
+
+static void
+release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(power_domains, struct drm_i915_private,
+ power_domains);
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ enum intel_display_power_domain domain;
+ intel_wakeref_t wakeref;
+
+ /*
+ * The caller must hold already raw wakeref, upgrade that to a proper
+ * wakeref to make the state checker happy about the HW access during
+ * power well disabling.
+ */
+ assert_rpm_raw_wakeref_held(rpm);
+ wakeref = intel_runtime_pm_get(rpm);
+
+ for_each_power_domain(domain, mask) {
+ /* Clear before put, so put's sanity check is happy. */
+ async_put_domains_clear_domain(power_domains, domain);
+ __intel_display_power_put_domain(dev_priv, domain);
+ }
+
+ intel_runtime_pm_put(rpm, wakeref);
+}
+
+static void
+intel_display_power_put_async_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private,
+ power_domains.async_put_work.work);
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
+ intel_wakeref_t old_work_wakeref = 0;
+
+ mutex_lock(&power_domains->lock);
+
+ /*
+ * Bail out if all the domain refs pending to be released were grabbed
+ * by subsequent gets or a flush_work.
+ */
+ old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
+ if (!old_work_wakeref)
+ goto out_verify;
+
+ release_async_put_domains(power_domains,
+ power_domains->async_put_domains[0]);
+
+ /* Requeue the work if more domains were async put meanwhile. */
+ if (power_domains->async_put_domains[1]) {
+ power_domains->async_put_domains[0] =
+ fetch_and_zero(&power_domains->async_put_domains[1]);
+ queue_async_put_domains_work(power_domains,
+ fetch_and_zero(&new_work_wakeref));
+ }
+
+out_verify:
+ verify_async_put_domains_state(power_domains);
+
+ mutex_unlock(&power_domains->lock);
+
+ if (old_work_wakeref)
+ intel_runtime_pm_put_raw(rpm, old_work_wakeref);
+ if (new_work_wakeref)
+ intel_runtime_pm_put_raw(rpm, new_work_wakeref);
+}
+
+/**
+ * intel_display_power_put_async - release a power domain reference asynchronously
+ * @i915: i915 device instance
+ * @domain: power domain to reference
+ * @wakeref: wakeref acquired for the reference that is being released
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get*() and schedules a work to power down the
+ * corresponding hardware block if this is the last reference.
+ */
+void __intel_display_power_put_async(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+ struct intel_runtime_pm *rpm = &i915->runtime_pm;
+ intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
+
+ mutex_lock(&power_domains->lock);
+
+ if (power_domains->domain_use_count[domain] > 1) {
+ __intel_display_power_put_domain(i915, domain);
+
+ goto out_verify;
+ }
+
+ WARN_ON(power_domains->domain_use_count[domain] != 1);
+
+ /* Let a pending work requeue itself or queue a new one. */
+ if (power_domains->async_put_wakeref) {
+ power_domains->async_put_domains[1] |= BIT_ULL(domain);
+ } else {
+ power_domains->async_put_domains[0] |= BIT_ULL(domain);
+ queue_async_put_domains_work(power_domains,
+ fetch_and_zero(&work_wakeref));
+ }
+
+out_verify:
+ verify_async_put_domains_state(power_domains);
+
+ mutex_unlock(&power_domains->lock);
+
+ if (work_wakeref)
+ intel_runtime_pm_put_raw(rpm, work_wakeref);
+
+ intel_runtime_pm_put(rpm, wakeref);
+}
+
+/**
+ * intel_display_power_flush_work - flushes the async display power disabling work
+ * @i915: i915 device instance
+ *
+ * Flushes any pending work that was scheduled by a preceding
+ * intel_display_power_put_async() call, completing the disabling of the
+ * corresponding power domains.
+ *
+ * Note that the work handler function may still be running after this
+ * function returns; to ensure that the work handler isn't running use
+ * intel_display_power_flush_work_sync() instead.
+ */
+void intel_display_power_flush_work(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+ intel_wakeref_t work_wakeref;
+
+ mutex_lock(&power_domains->lock);
+
+ work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
+ if (!work_wakeref)
+ goto out_verify;
+
+ release_async_put_domains(power_domains,
+ async_put_domains_mask(power_domains));
+ cancel_delayed_work(&power_domains->async_put_work);
+
+out_verify:
+ verify_async_put_domains_state(power_domains);
+
+ mutex_unlock(&power_domains->lock);
+
+ if (work_wakeref)
+ intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
+}
+
+/**
+ * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
+ * @i915: i915 device instance
+ *
+ * Like intel_display_power_flush_work(), but also ensure that the work
+ * handler function is not running any more when this function returns.
+ */
+static void
+intel_display_power_flush_work_sync(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+
+ intel_display_power_flush_work(i915);
+ cancel_delayed_work_sync(&power_domains->async_put_work);
+
+ verify_async_put_domains_state(power_domains);
+
+ WARN_ON(power_domains->async_put_wakeref);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+/**
+ * intel_display_power_put - release a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ * @wakeref: wakeref acquired for the reference that is being released
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ */
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ __intel_display_power_put(dev_priv, domain);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+}
+#endif
+
+#define I830_PIPES_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define VLV_DISPLAY_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
+ BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define CHV_DISPLAY_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define HSW_DISPLAY_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define BDW_DISPLAY_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
+#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
+#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
+#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_F) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+/*
+ * ICL PW_0/PG_0 domains (HW/DMC control):
+ * - PCI
+ * - clocks except port PLL
+ * - central power except FBC
+ * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
+ * ICL PW_1/PG_1 domains (HW/DMC control):
+ * - DBUF function
+ * - PIPE_A and its planes, except VGA
+ * - transcoder EDP + PSR
+ * - transcoder DSI
+ * - DDI_A
+ * - FBC
+ */
+#define ICL_PW_4_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+ /* VDSC/joining */
+#define ICL_PW_3_POWER_DOMAINS ( \
+ ICL_PW_4_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+ /*
+ * - transcoder WD
+ * - KVMR (HW control)
+ */
+#define ICL_PW_2_POWER_DOMAINS ( \
+ ICL_PW_3_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+ /*
+ * - KVMR (HW control)
+ */
+#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ ICL_PW_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define ICL_DDI_IO_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
+#define ICL_DDI_IO_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
+#define ICL_DDI_IO_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
+#define ICL_DDI_IO_D_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
+#define ICL_DDI_IO_E_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
+#define ICL_DDI_IO_F_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
+
+#define ICL_AUX_A_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A))
+#define ICL_AUX_B_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_B))
+#define ICL_AUX_C_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_C))
+#define ICL_AUX_D_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D))
+#define ICL_AUX_E_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_E))
+#define ICL_AUX_F_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_F))
+#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT1))
+#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT2))
+#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT3))
+#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT4))
+
+static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = i9xx_always_on_power_well_noop,
+ .disable = i9xx_always_on_power_well_noop,
+ .is_enabled = i9xx_always_on_power_well_enabled,
+};
+
+static const struct i915_power_well_ops chv_pipe_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = chv_pipe_power_well_enable,
+ .disable = chv_pipe_power_well_disable,
+ .is_enabled = chv_pipe_power_well_enabled,
+};
+
+static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = chv_dpio_cmn_power_well_enable,
+ .disable = chv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+};
+
+static const struct i915_power_well_ops i830_pipes_power_well_ops = {
+ .sync_hw = i830_pipes_power_well_sync_hw,
+ .enable = i830_pipes_power_well_enable,
+ .disable = i830_pipes_power_well_disable,
+ .is_enabled = i830_pipes_power_well_enabled,
+};
+
+static const struct i915_power_well_desc i830_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "pipes",
+ .domains = I830_PIPES_POWER_DOMAINS,
+ .ops = &i830_pipes_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+};
+
+static const struct i915_power_well_ops hsw_power_well_ops = {
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = hsw_power_well_enable,
+ .disable = hsw_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = gen9_dc_off_power_well_enable,
+ .disable = gen9_dc_off_power_well_disable,
+ .is_enabled = gen9_dc_off_power_well_enabled,
+};
+
+static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = bxt_dpio_cmn_power_well_enable,
+ .disable = bxt_dpio_cmn_power_well_disable,
+ .is_enabled = bxt_dpio_cmn_power_well_enabled,
+};
+
+static const struct i915_power_well_regs hsw_power_well_regs = {
+ .bios = HSW_PWR_WELL_CTL1,
+ .driver = HSW_PWR_WELL_CTL2,
+ .kvmr = HSW_PWR_WELL_CTL3,
+ .debug = HSW_PWR_WELL_CTL4,
+};
+
+static const struct i915_power_well_desc hsw_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "display",
+ .domains = HSW_DISPLAY_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = HSW_DISP_PW_GLOBAL,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
+ .hsw.has_vga = true,
+ },
+ },
+};
+
+static const struct i915_power_well_desc bdw_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "display",
+ .domains = BDW_DISPLAY_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = HSW_DISP_PW_GLOBAL,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
+ .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .hsw.has_vga = true,
+ },
+ },
+};
+
+static const struct i915_power_well_ops vlv_display_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = vlv_display_power_well_enable,
+ .disable = vlv_display_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = vlv_dpio_cmn_power_well_enable,
+ .disable = vlv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = vlv_power_well_enable,
+ .disable = vlv_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_desc vlv_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "display",
+ .domains = VLV_DISPLAY_POWER_DOMAINS,
+ .ops = &vlv_display_power_well_ops,
+ .id = VLV_DISP_PW_DISP2D,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
+ },
+ },
+ {
+ .name = "dpio-tx-b-01",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
+ },
+ },
+ {
+ .name = "dpio-tx-b-23",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
+ },
+ },
+ {
+ .name = "dpio-tx-c-01",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
+ },
+ },
+ {
+ .name = "dpio-tx-c-23",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
+ },
+ },
+ {
+ .name = "dpio-common",
+ .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
+ .ops = &vlv_dpio_cmn_power_well_ops,
+ .id = VLV_DISP_PW_DPIO_CMN_BC,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
+ },
+ },
+};
+
+static const struct i915_power_well_desc chv_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "display",
+ /*
+ * Pipe A power well is the new disp2d well. Pipe B and C
+ * power wells don't actually exist. Pipe A power well is
+ * required for any pipe to work.
+ */
+ .domains = CHV_DISPLAY_POWER_DOMAINS,
+ .ops = &chv_pipe_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "dpio-common-bc",
+ .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
+ .ops = &chv_dpio_cmn_power_well_ops,
+ .id = VLV_DISP_PW_DPIO_CMN_BC,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
+ },
+ },
+ {
+ .name = "dpio-common-d",
+ .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
+ .ops = &chv_dpio_cmn_power_well_ops,
+ .id = CHV_DISP_PW_DPIO_CMN_D,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
+ },
+ },
+};
+
+bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+ enum i915_power_well_id power_well_id)
+{
+ struct i915_power_well *power_well;
+ bool ret;
+
+ power_well = lookup_power_well(dev_priv, power_well_id);
+ ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
+
+ return ret;
+}
+
+static const struct i915_power_well_desc skl_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_1,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_1,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "MISC IO power well",
+ /* Handled by the DMC firmware */
+ .always_on = true,
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_MISC_IO,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
+ },
+ },
+ {
+ .name = "DC off",
+ .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 2",
+ .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_2,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+ .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DDI A/E IO power well",
+ .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
+ },
+ },
+ {
+ .name = "DDI B IO power well",
+ .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
+ },
+ },
+ {
+ .name = "DDI C IO power well",
+ .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
+ },
+ },
+ {
+ .name = "DDI D IO power well",
+ .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
+ },
+ },
+};
+
+static const struct i915_power_well_desc bxt_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_1,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_1,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DC off",
+ .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 2",
+ .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_2,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+ .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "dpio-common-a",
+ .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ .id = BXT_DISP_PW_DPIO_CMN_A,
+ {
+ .bxt.phy = DPIO_PHY1,
+ },
+ },
+ {
+ .name = "dpio-common-bc",
+ .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ .id = VLV_DISP_PW_DPIO_CMN_BC,
+ {
+ .bxt.phy = DPIO_PHY0,
+ },
+ },
+};
+
+static const struct i915_power_well_desc glk_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_1,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_1,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DC off",
+ .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 2",
+ .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_2,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+ .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "dpio-common-a",
+ .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ .id = BXT_DISP_PW_DPIO_CMN_A,
+ {
+ .bxt.phy = DPIO_PHY1,
+ },
+ },
+ {
+ .name = "dpio-common-b",
+ .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ .id = VLV_DISP_PW_DPIO_CMN_BC,
+ {
+ .bxt.phy = DPIO_PHY0,
+ },
+ },
+ {
+ .name = "dpio-common-c",
+ .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ .id = GLK_DISP_PW_DPIO_CMN_C,
+ {
+ .bxt.phy = DPIO_PHY2,
+ },
+ },
+ {
+ .name = "AUX A",
+ .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
+ },
+ },
+ {
+ .name = "AUX B",
+ .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
+ },
+ },
+ {
+ .name = "AUX C",
+ .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
+ },
+ },
+ {
+ .name = "DDI A IO power well",
+ .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
+ },
+ },
+ {
+ .name = "DDI B IO power well",
+ .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
+ },
+ },
+ {
+ .name = "DDI C IO power well",
+ .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
+ },
+ },
+};
+
+static const struct i915_power_well_desc cnl_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_1,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_1,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "AUX A",
+ .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
+ },
+ },
+ {
+ .name = "AUX B",
+ .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
+ },
+ },
+ {
+ .name = "AUX C",
+ .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
+ },
+ },
+ {
+ .name = "AUX D",
+ .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
+ },
+ },
+ {
+ .name = "DC off",
+ .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 2",
+ .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_2,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+ .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DDI A IO power well",
+ .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
+ },
+ },
+ {
+ .name = "DDI B IO power well",
+ .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
+ },
+ },
+ {
+ .name = "DDI C IO power well",
+ .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
+ },
+ },
+ {
+ .name = "DDI D IO power well",
+ .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
+ },
+ },
+ {
+ .name = "DDI F IO power well",
+ .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
+ },
+ },
+ {
+ .name = "AUX F",
+ .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
+ },
+ },
+};
+
+static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = icl_combo_phy_aux_power_well_enable,
+ .disable = icl_combo_phy_aux_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = icl_tc_phy_aux_power_well_enable,
+ .disable = hsw_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+static const struct i915_power_well_regs icl_aux_power_well_regs = {
+ .bios = ICL_PWR_WELL_CTL_AUX1,
+ .driver = ICL_PWR_WELL_CTL_AUX2,
+ .debug = ICL_PWR_WELL_CTL_AUX4,
+};
+
+static const struct i915_power_well_regs icl_ddi_power_well_regs = {
+ .bios = ICL_PWR_WELL_CTL_DDI1,
+ .driver = ICL_PWR_WELL_CTL_DDI2,
+ .debug = ICL_PWR_WELL_CTL_DDI4,
+};
+
+static const struct i915_power_well_desc icl_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_1,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_1,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DC off",
+ .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 2",
+ .domains = ICL_PW_2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_2,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "power well 3",
+ .domains = ICL_PW_3_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .hsw.irq_pipe_mask = BIT(PIPE_B),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DDI A IO",
+ .domains = ICL_DDI_IO_A_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
+ },
+ },
+ {
+ .name = "DDI B IO",
+ .domains = ICL_DDI_IO_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
+ },
+ },
+ {
+ .name = "DDI C IO",
+ .domains = ICL_DDI_IO_C_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
+ },
+ },
+ {
+ .name = "DDI D IO",
+ .domains = ICL_DDI_IO_D_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
+ },
+ },
+ {
+ .name = "DDI E IO",
+ .domains = ICL_DDI_IO_E_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
+ },
+ },
+ {
+ .name = "DDI F IO",
+ .domains = ICL_DDI_IO_F_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
+ },
+ },
+ {
+ .name = "AUX A",
+ .domains = ICL_AUX_A_IO_POWER_DOMAINS,
+ .ops = &icl_combo_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
+ },
+ },
+ {
+ .name = "AUX B",
+ .domains = ICL_AUX_B_IO_POWER_DOMAINS,
+ .ops = &icl_combo_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
+ },
+ },
+ {
+ .name = "AUX C",
+ .domains = ICL_AUX_C_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX D",
+ .domains = ICL_AUX_D_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX E",
+ .domains = ICL_AUX_E_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX F",
+ .domains = ICL_AUX_F_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX TBT1",
+ .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX TBT2",
+ .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX TBT3",
+ .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX TBT4",
+ .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "power well 4",
+ .domains = ICL_PW_4_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4,
+ .hsw.has_fuses = true,
+ .hsw.irq_pipe_mask = BIT(PIPE_C),
+ },
+ },
+};
+
+static int
+sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
+ int disable_power_well)
+{
+ if (disable_power_well >= 0)
+ return !!disable_power_well;
+
+ return 1;
+}
+
+static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
+ int enable_dc)
+{
+ u32 mask;
+ int requested_dc;
+ int max_dc;
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ max_dc = 2;
+ /*
+ * DC9 has a separate HW flow from the rest of the DC states,
+ * not depending on the DMC firmware. It's needed by system
+ * suspend/resume, so allow it unconditionally.
+ */
+ mask = DC_STATE_EN_DC9;
+ } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
+ max_dc = 2;
+ mask = 0;
+ } else if (IS_GEN9_LP(dev_priv)) {
+ max_dc = 1;
+ mask = DC_STATE_EN_DC9;
+ } else {
+ max_dc = 0;
+ mask = 0;
+ }
+
+ if (!i915_modparams.disable_power_well)
+ max_dc = 0;
+
+ if (enable_dc >= 0 && enable_dc <= max_dc) {
+ requested_dc = enable_dc;
+ } else if (enable_dc == -1) {
+ requested_dc = max_dc;
+ } else if (enable_dc > max_dc && enable_dc <= 2) {
+ DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
+ enable_dc, max_dc);
+ requested_dc = max_dc;
+ } else {
+ DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
+ requested_dc = max_dc;
+ }
+
+ if (requested_dc > 1)
+ mask |= DC_STATE_EN_UPTO_DC6;
+ if (requested_dc > 0)
+ mask |= DC_STATE_EN_UPTO_DC5;
+
+ DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
+
+ return mask;
+}
+
+static int
+__set_power_wells(struct i915_power_domains *power_domains,
+ const struct i915_power_well_desc *power_well_descs,
+ int power_well_count)
+{
+ u64 power_well_ids = 0;
+ int i;
+
+ power_domains->power_well_count = power_well_count;
+ power_domains->power_wells =
+ kcalloc(power_well_count,
+ sizeof(*power_domains->power_wells),
+ GFP_KERNEL);
+ if (!power_domains->power_wells)
+ return -ENOMEM;
+
+ for (i = 0; i < power_well_count; i++) {
+ enum i915_power_well_id id = power_well_descs[i].id;
+
+ power_domains->power_wells[i].desc = &power_well_descs[i];
+
+ if (id == DISP_PW_ID_NONE)
+ continue;
+
+ WARN_ON(id >= sizeof(power_well_ids) * 8);
+ WARN_ON(power_well_ids & BIT_ULL(id));
+ power_well_ids |= BIT_ULL(id);
+ }
+
+ return 0;
+}
+
+#define set_power_wells(power_domains, __power_well_descs) \
+ __set_power_wells(power_domains, __power_well_descs, \
+ ARRAY_SIZE(__power_well_descs))
+
+/**
+ * intel_power_domains_init - initializes the power domain structures
+ * @dev_priv: i915 device instance
+ *
+ * Initializes the power domain structures for @dev_priv depending upon the
+ * supported platform.
+ */
+int intel_power_domains_init(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ int err;
+
+ i915_modparams.disable_power_well =
+ sanitize_disable_power_well_option(dev_priv,
+ i915_modparams.disable_power_well);
+ dev_priv->csr.allowed_dc_mask =
+ get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
+
+ BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
+
+ mutex_init(&power_domains->lock);
+
+ INIT_DELAYED_WORK(&power_domains->async_put_work,
+ intel_display_power_put_async_work);
+
+ /*
+ * The enabling order will be from lower to higher indexed wells,
+ * the disabling order is reversed.
+ */
+ if (IS_GEN(dev_priv, 11)) {
+ err = set_power_wells(power_domains, icl_power_wells);
+ } else if (IS_CANNONLAKE(dev_priv)) {
+ err = set_power_wells(power_domains, cnl_power_wells);
+
+ /*
+ * DDI and Aux IO are getting enabled for all ports
+ * regardless the presence or use. So, in order to avoid
+ * timeouts, lets remove them from the list
+ * for the SKUs without port F.
+ */
+ if (!IS_CNL_WITH_PORT_F(dev_priv))
+ power_domains->power_well_count -= 2;
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ err = set_power_wells(power_domains, glk_power_wells);
+ } else if (IS_BROXTON(dev_priv)) {
+ err = set_power_wells(power_domains, bxt_power_wells);
+ } else if (IS_GEN9_BC(dev_priv)) {
+ err = set_power_wells(power_domains, skl_power_wells);
+ } else if (IS_CHERRYVIEW(dev_priv)) {
+ err = set_power_wells(power_domains, chv_power_wells);
+ } else if (IS_BROADWELL(dev_priv)) {
+ err = set_power_wells(power_domains, bdw_power_wells);
+ } else if (IS_HASWELL(dev_priv)) {
+ err = set_power_wells(power_domains, hsw_power_wells);
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ err = set_power_wells(power_domains, vlv_power_wells);
+ } else if (IS_I830(dev_priv)) {
+ err = set_power_wells(power_domains, i830_power_wells);
+ } else {
+ err = set_power_wells(power_domains, i9xx_always_on_power_well);
+ }
+
+ return err;
+}
+
+/**
+ * intel_power_domains_cleanup - clean up power domains resources
+ * @dev_priv: i915 device instance
+ *
+ * Release any resources acquired by intel_power_domains_init()
+ */
+void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
+{
+ kfree(dev_priv->power_domains.power_wells);
+}
+
+static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+
+ mutex_lock(&power_domains->lock);
+ for_each_power_well(dev_priv, power_well) {
+ power_well->desc->ops->sync_hw(dev_priv, power_well);
+ power_well->hw_enabled =
+ power_well->desc->ops->is_enabled(dev_priv, power_well);
+ }
+ mutex_unlock(&power_domains->lock);
+}
+
+static inline
+bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
+ i915_reg_t reg, bool enable)
+{
+ u32 val, status;
+
+ val = I915_READ(reg);
+ val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(10);
+
+ status = I915_READ(reg) & DBUF_POWER_STATE;
+ if ((enable && !status) || (!enable && status)) {
+ DRM_ERROR("DBus power %s timeout!\n",
+ enable ? "enable" : "disable");
+ return false;
+ }
+ return true;
+}
+
+static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
+{
+ intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
+}
+
+static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
+{
+ intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
+}
+
+static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) < 11)
+ return 1;
+ return 2;
+}
+
+void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
+ u8 req_slices)
+{
+ const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
+ bool ret;
+
+ if (req_slices > intel_dbuf_max_slices(dev_priv)) {
+ DRM_ERROR("Invalid number of dbuf slices requested\n");
+ return;
+ }
+
+ if (req_slices == hw_enabled_slices || req_slices == 0)
+ return;
+
+ if (req_slices > hw_enabled_slices)
+ ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
+ else
+ ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
+
+ if (ret)
+ dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
+}
+
+static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
+ I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
+ POSTING_READ(DBUF_CTL_S2);
+
+ udelay(10);
+
+ if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
+ !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
+ DRM_ERROR("DBuf power enable timeout\n");
+ else
+ /*
+ * FIXME: for now pretend that we only have 1 slice, see
+ * intel_enabled_dbuf_slices_num().
+ */
+ dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
+}
+
+static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
+ I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
+ POSTING_READ(DBUF_CTL_S2);
+
+ udelay(10);
+
+ if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
+ (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
+ DRM_ERROR("DBuf power disable timeout!\n");
+ else
+ /*
+ * FIXME: for now pretend that the first slice is always
+ * enabled, see intel_enabled_dbuf_slices_num().
+ */
+ dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
+}
+
+static void icl_mbus_init(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
+ MBUS_ABOX_BT_CREDIT_POOL2(16) |
+ MBUS_ABOX_B_CREDIT(1) |
+ MBUS_ABOX_BW_CREDIT(1);
+
+ I915_WRITE(MBUS_ABOX_CTL, val);
+}
+
+static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
+{
+ u32 val = I915_READ(LCPLL_CTL);
+
+ /*
+ * The LCPLL register should be turned on by the BIOS. For now
+ * let's just check its state and print errors in case
+ * something is wrong. Don't even try to turn it on.
+ */
+
+ if (val & LCPLL_CD_SOURCE_FCLK)
+ DRM_ERROR("CDCLK source is not LCPLL\n");
+
+ if (val & LCPLL_PLL_DISABLE)
+ DRM_ERROR("LCPLL is disabled\n");
+
+ if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
+ DRM_ERROR("LCPLL not using non-SSC reference\n");
+}
+
+static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(dev, crtc)
+ I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
+ pipe_name(crtc->pipe));
+
+ I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
+ "Display power well on\n");
+ I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE,
+ "SPLL enabled\n");
+ I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
+ "WRPLL1 enabled\n");
+ I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
+ "WRPLL2 enabled\n");
+ I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON,
+ "Panel power on\n");
+ I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+ "CPU PWM1 enabled\n");
+ if (IS_HASWELL(dev_priv))
+ I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+ "CPU PWM2 enabled\n");
+ I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+ "PCH PWM1 enabled\n");
+ I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ "Utility pin enabled\n");
+ I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE,
+ "PCH GTC enabled\n");
+
+ /*
+ * In theory we can still leave IRQs enabled, as long as only the HPD
+ * interrupts remain enabled. We used to check for that, but since it's
+ * gen-specific and since we only disable LCPLL after we fully disable
+ * the interrupts, the check below should be enough.
+ */
+ I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
+}
+
+static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
+{
+ if (IS_HASWELL(dev_priv))
+ return I915_READ(D_COMP_HSW);
+ else
+ return I915_READ(D_COMP_BDW);
+}
+
+static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
+{
+ if (IS_HASWELL(dev_priv)) {
+ if (sandybridge_pcode_write(dev_priv,
+ GEN6_PCODE_WRITE_D_COMP, val))
+ DRM_DEBUG_KMS("Failed to write to D_COMP\n");
+ } else {
+ I915_WRITE(D_COMP_BDW, val);
+ POSTING_READ(D_COMP_BDW);
+ }
+}
+
+/*
+ * This function implements pieces of two sequences from BSpec:
+ * - Sequence for display software to disable LCPLL
+ * - Sequence for display software to allow package C8+
+ * The steps implemented here are just the steps that actually touch the LCPLL
+ * register. Callers should take care of disabling all the display engine
+ * functions, doing the mode unset, fixing interrupts, etc.
+ */
+static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+ bool switch_to_fclk, bool allow_power_down)
+{
+ u32 val;
+
+ assert_can_disable_lcpll(dev_priv);
+
+ val = I915_READ(LCPLL_CTL);
+
+ if (switch_to_fclk) {
+ val |= LCPLL_CD_SOURCE_FCLK;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for_us(I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ DRM_ERROR("Switching to FCLK failed\n");
+
+ val = I915_READ(LCPLL_CTL);
+ }
+
+ val |= LCPLL_PLL_DISABLE;
+ I915_WRITE(LCPLL_CTL, val);
+ POSTING_READ(LCPLL_CTL);
+
+ if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
+ LCPLL_PLL_LOCK, 0, 1))
+ DRM_ERROR("LCPLL still locked\n");
+
+ val = hsw_read_dcomp(dev_priv);
+ val |= D_COMP_COMP_DISABLE;
+ hsw_write_dcomp(dev_priv, val);
+ ndelay(100);
+
+ if (wait_for((hsw_read_dcomp(dev_priv) &
+ D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
+ DRM_ERROR("D_COMP RCOMP still in progress\n");
+
+ if (allow_power_down) {
+ val = I915_READ(LCPLL_CTL);
+ val |= LCPLL_POWER_DOWN_ALLOW;
+ I915_WRITE(LCPLL_CTL, val);
+ POSTING_READ(LCPLL_CTL);
+ }
+}
+
+/*
+ * Fully restores LCPLL, disallowing power down and switching back to LCPLL
+ * source.
+ */
+static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = I915_READ(LCPLL_CTL);
+
+ if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
+ LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
+ return;
+
+ /*
+ * Make sure we're not on PC8 state before disabling PC8, otherwise
+ * we'll hang the machine. To prevent PC8 state, just enable force_wake.
+ */
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+
+ if (val & LCPLL_POWER_DOWN_ALLOW) {
+ val &= ~LCPLL_POWER_DOWN_ALLOW;
+ I915_WRITE(LCPLL_CTL, val);
+ POSTING_READ(LCPLL_CTL);
+ }
+
+ val = hsw_read_dcomp(dev_priv);
+ val |= D_COMP_COMP_FORCE;
+ val &= ~D_COMP_COMP_DISABLE;
+ hsw_write_dcomp(dev_priv, val);
+
+ val = I915_READ(LCPLL_CTL);
+ val &= ~LCPLL_PLL_DISABLE;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
+ LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5))
+ DRM_ERROR("LCPLL not locked yet\n");
+
+ if (val & LCPLL_CD_SOURCE_FCLK) {
+ val = I915_READ(LCPLL_CTL);
+ val &= ~LCPLL_CD_SOURCE_FCLK;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for_us((I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ DRM_ERROR("Switching back to LCPLL failed\n");
+ }
+
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+
+ intel_update_cdclk(dev_priv);
+ intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+}
+
+/*
+ * Package states C8 and deeper are really deep PC states that can only be
+ * reached when all the devices on the system allow it, so even if the graphics
+ * device allows PC8+, it doesn't mean the system will actually get to these
+ * states. Our driver only allows PC8+ when going into runtime PM.
+ *
+ * The requirements for PC8+ are that all the outputs are disabled, the power
+ * well is disabled and most interrupts are disabled, and these are also
+ * requirements for runtime PM. When these conditions are met, we manually do
+ * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
+ * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
+ * hang the machine.
+ *
+ * When we really reach PC8 or deeper states (not just when we allow it) we lose
+ * the state of some registers, so when we come back from PC8+ we need to
+ * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
+ * need to take care of the registers kept by RC6. Notice that this happens even
+ * if we don't put the device in PCI D3 state (which is what currently happens
+ * because of the runtime PM support).
+ *
+ * For more, read "Display Sequences for Package C8" on the hardware
+ * documentation.
+ */
+void hsw_enable_pc8(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ DRM_DEBUG_KMS("Enabling package C8+\n");
+
+ if (HAS_PCH_LPT_LP(dev_priv)) {
+ val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ }
+
+ lpt_disable_clkout_dp(dev_priv);
+ hsw_disable_lcpll(dev_priv, true, true);
+}
+
+void hsw_disable_pc8(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ DRM_DEBUG_KMS("Disabling package C8+\n");
+
+ hsw_restore_lcpll(dev_priv);
+ intel_init_pch_refclk(dev_priv);
+
+ if (HAS_PCH_LPT_LP(dev_priv)) {
+ val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val |= PCH_LP_PARTITION_LEVEL_DISABLE;
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ }
+}
+
+static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ i915_reg_t reg;
+ u32 reset_bits, val;
+
+ if (IS_IVYBRIDGE(dev_priv)) {
+ reg = GEN7_MSG_CTL;
+ reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
+ } else {
+ reg = HSW_NDE_RSTWRN_OPT;
+ reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
+ }
+
+ val = I915_READ(reg);
+
+ if (enable)
+ val |= reset_bits;
+ else
+ val &= ~reset_bits;
+
+ I915_WRITE(reg, val);
+}
+
+static void skl_display_core_init(struct drm_i915_private *dev_priv,
+ bool resume)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /* enable PCH reset handshake */
+ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+
+ /* enable PG1 and Misc I/O */
+ mutex_lock(&power_domains->lock);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_enable(dev_priv, well);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
+ intel_power_well_enable(dev_priv, well);
+
+ mutex_unlock(&power_domains->lock);
+
+ intel_cdclk_init(dev_priv);
+
+ gen9_dbuf_enable(dev_priv);
+
+ if (resume && dev_priv->csr.dmc_payload)
+ intel_csr_load_program(dev_priv);
+}
+
+static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ gen9_dbuf_disable(dev_priv);
+
+ intel_cdclk_uninit(dev_priv);
+
+ /* The spec doesn't call for removing the reset handshake flag */
+ /* disable PG1 and Misc I/O */
+
+ mutex_lock(&power_domains->lock);
+
+ /*
+ * BSpec says to keep the MISC IO power well enabled here, only
+ * remove our request for power well 1.
+ * Note that even though the driver's request is removed power well 1
+ * may stay enabled after this due to DMC's own request on it.
+ */
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_disable(dev_priv, well);
+
+ mutex_unlock(&power_domains->lock);
+
+ usleep_range(10, 30); /* 10 us delay per Bspec */
+}
+
+void bxt_display_core_init(struct drm_i915_private *dev_priv,
+ bool resume)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /*
+ * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
+ * or else the reset will hang because there is no PCH to respond.
+ * Move the handshake programming to initialization sequence.
+ * Previously was left up to BIOS.
+ */
+ intel_pch_reset_handshake(dev_priv, false);
+
+ /* Enable PG1 */
+ mutex_lock(&power_domains->lock);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_enable(dev_priv, well);
+
+ mutex_unlock(&power_domains->lock);
+
+ intel_cdclk_init(dev_priv);
+
+ gen9_dbuf_enable(dev_priv);
+
+ if (resume && dev_priv->csr.dmc_payload)
+ intel_csr_load_program(dev_priv);
+}
+
+void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ gen9_dbuf_disable(dev_priv);
+
+ intel_cdclk_uninit(dev_priv);
+
+ /* The spec doesn't call for removing the reset handshake flag */
+
+ /*
+ * Disable PW1 (PG1).
+ * Note that even though the driver's request is removed power well 1
+ * may stay enabled after this due to DMC's own request on it.
+ */
+ mutex_lock(&power_domains->lock);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_disable(dev_priv, well);
+
+ mutex_unlock(&power_domains->lock);
+
+ usleep_range(10, 30); /* 10 us delay per Bspec */
+}
+
+static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /* 1. Enable PCH Reset Handshake */
+ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+
+ /* 2-3. */
+ intel_combo_phy_init(dev_priv);
+
+ /*
+ * 4. Enable Power Well 1 (PG1).
+ * The AUX IO power wells will be enabled on demand.
+ */
+ mutex_lock(&power_domains->lock);
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_enable(dev_priv, well);
+ mutex_unlock(&power_domains->lock);
+
+ /* 5. Enable CD clock */
+ intel_cdclk_init(dev_priv);
+
+ /* 6. Enable DBUF */
+ gen9_dbuf_enable(dev_priv);
+
+ if (resume && dev_priv->csr.dmc_payload)
+ intel_csr_load_program(dev_priv);
+}
+
+static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /* 1. Disable all display engine functions -> aready done */
+
+ /* 2. Disable DBUF */
+ gen9_dbuf_disable(dev_priv);
+
+ /* 3. Disable CD clock */
+ intel_cdclk_uninit(dev_priv);
+
+ /*
+ * 4. Disable Power Well 1 (PG1).
+ * The AUX IO power wells are toggled on demand, so they are already
+ * disabled at this point.
+ */
+ mutex_lock(&power_domains->lock);
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_disable(dev_priv, well);
+ mutex_unlock(&power_domains->lock);
+
+ usleep_range(10, 30); /* 10 us delay per Bspec */
+
+ /* 5. */
+ intel_combo_phy_uninit(dev_priv);
+}
+
+void icl_display_core_init(struct drm_i915_private *dev_priv,
+ bool resume)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /* 1. Enable PCH reset handshake. */
+ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+
+ /* 2. Initialize all combo phys */
+ intel_combo_phy_init(dev_priv);
+
+ /*
+ * 3. Enable Power Well 1 (PG1).
+ * The AUX IO power wells will be enabled on demand.
+ */
+ mutex_lock(&power_domains->lock);
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_enable(dev_priv, well);
+ mutex_unlock(&power_domains->lock);
+
+ /* 4. Enable CDCLK. */
+ intel_cdclk_init(dev_priv);
+
+ /* 5. Enable DBUF. */
+ icl_dbuf_enable(dev_priv);
+
+ /* 6. Setup MBUS. */
+ icl_mbus_init(dev_priv);
+
+ if (resume && dev_priv->csr.dmc_payload)
+ intel_csr_load_program(dev_priv);
+}
+
+void icl_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /* 1. Disable all display engine functions -> aready done */
+
+ /* 2. Disable DBUF */
+ icl_dbuf_disable(dev_priv);
+
+ /* 3. Disable CD clock */
+ intel_cdclk_uninit(dev_priv);
+
+ /*
+ * 4. Disable Power Well 1 (PG1).
+ * The AUX IO power wells are toggled on demand, so they are already
+ * disabled at this point.
+ */
+ mutex_lock(&power_domains->lock);
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_disable(dev_priv, well);
+ mutex_unlock(&power_domains->lock);
+
+ /* 5. */
+ intel_combo_phy_uninit(dev_priv);
+}
+
+static void chv_phy_control_init(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn_bc =
+ lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ struct i915_power_well *cmn_d =
+ lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
+
+ /*
+ * DISPLAY_PHY_CONTROL can get corrupted if read. As a
+ * workaround never ever read DISPLAY_PHY_CONTROL, and
+ * instead maintain a shadow copy ourselves. Use the actual
+ * power well state and lane status to reconstruct the
+ * expected initial value.
+ */
+ dev_priv->chv_phy_control =
+ PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
+ PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
+ PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
+ PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
+ PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
+
+ /*
+ * If all lanes are disabled we leave the override disabled
+ * with all power down bits cleared to match the state we
+ * would use after disabling the port. Otherwise enable the
+ * override and set the lane powerdown bits accding to the
+ * current lane status.
+ */
+ if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
+ u32 status = I915_READ(DPLL(PIPE_A));
+ unsigned int mask;
+
+ mask = status & DPLL_PORTB_READY_MASK;
+ if (mask == 0xf)
+ mask = 0x0;
+ else
+ dev_priv->chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
+
+ dev_priv->chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
+
+ mask = (status & DPLL_PORTC_READY_MASK) >> 4;
+ if (mask == 0xf)
+ mask = 0x0;
+ else
+ dev_priv->chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
+
+ dev_priv->chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
+
+ dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
+
+ dev_priv->chv_phy_assert[DPIO_PHY0] = false;
+ } else {
+ dev_priv->chv_phy_assert[DPIO_PHY0] = true;
+ }
+
+ if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
+ u32 status = I915_READ(DPIO_PHY_STATUS);
+ unsigned int mask;
+
+ mask = status & DPLL_PORTD_READY_MASK;
+
+ if (mask == 0xf)
+ mask = 0x0;
+ else
+ dev_priv->chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
+
+ dev_priv->chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
+
+ dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
+
+ dev_priv->chv_phy_assert[DPIO_PHY1] = false;
+ } else {
+ dev_priv->chv_phy_assert[DPIO_PHY1] = true;
+ }
+
+ I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+ DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
+ dev_priv->chv_phy_control);
+}
+
+static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn =
+ lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ struct i915_power_well *disp2d =
+ lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
+
+ /* If the display might be already active skip this */
+ if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
+ disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
+ I915_READ(DPIO_CTL) & DPIO_CMNRST)
+ return;
+
+ DRM_DEBUG_KMS("toggling display PHY side reset\n");
+
+ /* cmnlane needs DPLL registers */
+ disp2d->desc->ops->enable(dev_priv, disp2d);
+
+ /*
+ * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
+ * Need to assert and de-assert PHY SB reset by gating the
+ * common lane power, then un-gating it.
+ * Simply ungating isn't enough to reset the PHY enough to get
+ * ports and lanes running.
+ */
+ cmn->desc->ops->disable(dev_priv, cmn);
+}
+
+static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
+{
+ bool ret;
+
+ vlv_punit_get(dev_priv);
+ ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
+ vlv_punit_put(dev_priv);
+
+ return ret;
+}
+
+static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
+{
+ WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
+ "VED not power gated\n");
+}
+
+static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
+{
+ static const struct pci_device_id isp_ids[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
+ {}
+ };
+
+ WARN(!pci_dev_present(isp_ids) &&
+ !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
+ "ISP not power gated\n");
+}
+
+static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
+
+/**
+ * intel_power_domains_init_hw - initialize hardware power domain state
+ * @i915: i915 device instance
+ * @resume: Called from resume code paths or not
+ *
+ * This function initializes the hardware power domain state and enables all
+ * power wells belonging to the INIT power domain. Power wells in other
+ * domains (and not in the INIT domain) are referenced or disabled by
+ * intel_modeset_readout_hw_state(). After that the reference count of each
+ * power well must match its HW enabled state, see
+ * intel_power_domains_verify_state().
+ *
+ * It will return with power domains disabled (to be enabled later by
+ * intel_power_domains_enable()) and must be paired with
+ * intel_power_domains_fini_hw().
+ */
+void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+
+ power_domains->initializing = true;
+
+ if (INTEL_GEN(i915) >= 11) {
+ icl_display_core_init(i915, resume);
+ } else if (IS_CANNONLAKE(i915)) {
+ cnl_display_core_init(i915, resume);
+ } else if (IS_GEN9_BC(i915)) {
+ skl_display_core_init(i915, resume);
+ } else if (IS_GEN9_LP(i915)) {
+ bxt_display_core_init(i915, resume);
+ } else if (IS_CHERRYVIEW(i915)) {
+ mutex_lock(&power_domains->lock);
+ chv_phy_control_init(i915);
+ mutex_unlock(&power_domains->lock);
+ assert_isp_power_gated(i915);
+ } else if (IS_VALLEYVIEW(i915)) {
+ mutex_lock(&power_domains->lock);
+ vlv_cmnlane_wa(i915);
+ mutex_unlock(&power_domains->lock);
+ assert_ved_power_gated(i915);
+ assert_isp_power_gated(i915);
+ } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
+ hsw_assert_cdclk(i915);
+ intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
+ } else if (IS_IVYBRIDGE(i915)) {
+ intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
+ }
+
+ /*
+ * Keep all power wells enabled for any dependent HW access during
+ * initialization and to make sure we keep BIOS enabled display HW
+ * resources powered until display HW readout is complete. We drop
+ * this reference in intel_power_domains_enable().
+ */
+ power_domains->wakeref =
+ intel_display_power_get(i915, POWER_DOMAIN_INIT);
+
+ /* Disable power support if the user asked so. */
+ if (!i915_modparams.disable_power_well)
+ intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ intel_power_domains_sync_hw(i915);
+
+ power_domains->initializing = false;
+}
+
+/**
+ * intel_power_domains_fini_hw - deinitialize hw power domain state
+ * @i915: i915 device instance
+ *
+ * De-initializes the display power domain HW state. It also ensures that the
+ * device stays powered up so that the driver can be reloaded.
+ *
+ * It must be called with power domains already disabled (after a call to
+ * intel_power_domains_disable()) and must be paired with
+ * intel_power_domains_init_hw().
+ */
+void intel_power_domains_fini_hw(struct drm_i915_private *i915)
+{
+ intel_wakeref_t wakeref __maybe_unused =
+ fetch_and_zero(&i915->power_domains.wakeref);
+
+ /* Remove the refcount we took to keep power well support disabled. */
+ if (!i915_modparams.disable_power_well)
+ intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+
+ intel_display_power_flush_work_sync(i915);
+
+ intel_power_domains_verify_state(i915);
+
+ /* Keep the power well enabled, but cancel its rpm wakeref. */
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+}
+
+/**
+ * intel_power_domains_enable - enable toggling of display power wells
+ * @i915: i915 device instance
+ *
+ * Enable the ondemand enabling/disabling of the display power wells. Note that
+ * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
+ * only at specific points of the display modeset sequence, thus they are not
+ * affected by the intel_power_domains_enable()/disable() calls. The purpose
+ * of these function is to keep the rest of power wells enabled until the end
+ * of display HW readout (which will acquire the power references reflecting
+ * the current HW state).
+ */
+void intel_power_domains_enable(struct drm_i915_private *i915)
+{
+ intel_wakeref_t wakeref __maybe_unused =
+ fetch_and_zero(&i915->power_domains.wakeref);
+
+ intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+ intel_power_domains_verify_state(i915);
+}
+
+/**
+ * intel_power_domains_disable - disable toggling of display power wells
+ * @i915: i915 device instance
+ *
+ * Disable the ondemand enabling/disabling of the display power wells. See
+ * intel_power_domains_enable() for which power wells this call controls.
+ */
+void intel_power_domains_disable(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+
+ WARN_ON(power_domains->wakeref);
+ power_domains->wakeref =
+ intel_display_power_get(i915, POWER_DOMAIN_INIT);
+
+ intel_power_domains_verify_state(i915);
+}
+
+/**
+ * intel_power_domains_suspend - suspend power domain state
+ * @i915: i915 device instance
+ * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
+ *
+ * This function prepares the hardware power domain state before entering
+ * system suspend.
+ *
+ * It must be called with power domains already disabled (after a call to
+ * intel_power_domains_disable()) and paired with intel_power_domains_resume().
+ */
+void intel_power_domains_suspend(struct drm_i915_private *i915,
+ enum i915_drm_suspend_mode suspend_mode)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+ intel_wakeref_t wakeref __maybe_unused =
+ fetch_and_zero(&power_domains->wakeref);
+
+ intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+
+ /*
+ * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
+ * support don't manually deinit the power domains. This also means the
+ * CSR/DMC firmware will stay active, it will power down any HW
+ * resources as required and also enable deeper system power states
+ * that would be blocked if the firmware was inactive.
+ */
+ if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
+ suspend_mode == I915_DRM_SUSPEND_IDLE &&
+ i915->csr.dmc_payload) {
+ intel_display_power_flush_work(i915);
+ intel_power_domains_verify_state(i915);
+ return;
+ }
+
+ /*
+ * Even if power well support was disabled we still want to disable
+ * power wells if power domains must be deinitialized for suspend.
+ */
+ if (!i915_modparams.disable_power_well)
+ intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+
+ intel_display_power_flush_work(i915);
+ intel_power_domains_verify_state(i915);
+
+ if (INTEL_GEN(i915) >= 11)
+ icl_display_core_uninit(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_display_core_uninit(i915);
+ else if (IS_GEN9_BC(i915))
+ skl_display_core_uninit(i915);
+ else if (IS_GEN9_LP(i915))
+ bxt_display_core_uninit(i915);
+
+ power_domains->display_core_suspended = true;
+}
+
+/**
+ * intel_power_domains_resume - resume power domain state
+ * @i915: i915 device instance
+ *
+ * This function resume the hardware power domain state during system resume.
+ *
+ * It will return with power domain support disabled (to be enabled later by
+ * intel_power_domains_enable()) and must be paired with
+ * intel_power_domains_suspend().
+ */
+void intel_power_domains_resume(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+
+ if (power_domains->display_core_suspended) {
+ intel_power_domains_init_hw(i915, true);
+ power_domains->display_core_suspended = false;
+ } else {
+ WARN_ON(power_domains->wakeref);
+ power_domains->wakeref =
+ intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ }
+
+ intel_power_domains_verify_state(i915);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
+static void intel_power_domains_dump_info(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_well *power_well;
+
+ for_each_power_well(i915, power_well) {
+ enum intel_display_power_domain domain;
+
+ DRM_DEBUG_DRIVER("%-25s %d\n",
+ power_well->desc->name, power_well->count);
+
+ for_each_power_domain(domain, power_well->desc->domains)
+ DRM_DEBUG_DRIVER(" %-23s %d\n",
+ intel_display_power_domain_str(domain),
+ power_domains->domain_use_count[domain]);
+ }
+}
+
+/**
+ * intel_power_domains_verify_state - verify the HW/SW state for all power wells
+ * @i915: i915 device instance
+ *
+ * Verify if the reference count of each power well matches its HW enabled
+ * state and the total refcount of the domains it belongs to. This must be
+ * called after modeset HW state sanitization, which is responsible for
+ * acquiring reference counts for any power wells in use and disabling the
+ * ones left on by BIOS but not required by any active output.
+ */
+static void intel_power_domains_verify_state(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_well *power_well;
+ bool dump_domain_info;
+
+ mutex_lock(&power_domains->lock);
+
+ verify_async_put_domains_state(power_domains);
+
+ dump_domain_info = false;
+ for_each_power_well(i915, power_well) {
+ enum intel_display_power_domain domain;
+ int domains_count;
+ bool enabled;
+
+ enabled = power_well->desc->ops->is_enabled(i915, power_well);
+ if ((power_well->count || power_well->desc->always_on) !=
+ enabled)
+ DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
+ power_well->desc->name,
+ power_well->count, enabled);
+
+ domains_count = 0;
+ for_each_power_domain(domain, power_well->desc->domains)
+ domains_count += power_domains->domain_use_count[domain];
+
+ if (power_well->count != domains_count) {
+ DRM_ERROR("power well %s refcount/domain refcount mismatch "
+ "(refcount %d/domains refcount %d)\n",
+ power_well->desc->name, power_well->count,
+ domains_count);
+ dump_domain_info = true;
+ }
+ }
+
+ if (dump_domain_info) {
+ static bool dumped;
+
+ if (!dumped) {
+ intel_power_domains_dump_info(i915);
+ dumped = true;
+ }
+ }
+
+ mutex_unlock(&power_domains->lock);
+}
+
+#else
+
+static void intel_power_domains_verify_state(struct drm_i915_private *i915)
+{
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
new file mode 100644
index 000000000000..ff57b0a7fe59
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_POWER_H__
+#define __INTEL_DISPLAY_POWER_H__
+
+#include "intel_display.h"
+#include "intel_runtime_pm.h"
+#include "i915_reg.h"
+
+struct drm_i915_private;
+struct intel_encoder;
+
+enum intel_display_power_domain {
+ POWER_DOMAIN_DISPLAY_CORE,
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_A_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_B_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_C_PANEL_FITTER,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_TRANSCODER_EDP,
+ POWER_DOMAIN_TRANSCODER_EDP_VDSC,
+ POWER_DOMAIN_TRANSCODER_DSI_A,
+ POWER_DOMAIN_TRANSCODER_DSI_C,
+ POWER_DOMAIN_PORT_DDI_A_LANES,
+ POWER_DOMAIN_PORT_DDI_B_LANES,
+ POWER_DOMAIN_PORT_DDI_C_LANES,
+ POWER_DOMAIN_PORT_DDI_D_LANES,
+ POWER_DOMAIN_PORT_DDI_E_LANES,
+ POWER_DOMAIN_PORT_DDI_F_LANES,
+ POWER_DOMAIN_PORT_DDI_A_IO,
+ POWER_DOMAIN_PORT_DDI_B_IO,
+ POWER_DOMAIN_PORT_DDI_C_IO,
+ POWER_DOMAIN_PORT_DDI_D_IO,
+ POWER_DOMAIN_PORT_DDI_E_IO,
+ POWER_DOMAIN_PORT_DDI_F_IO,
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_PORT_CRT,
+ POWER_DOMAIN_PORT_OTHER,
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_AUX_D,
+ POWER_DOMAIN_AUX_E,
+ POWER_DOMAIN_AUX_F,
+ POWER_DOMAIN_AUX_IO_A,
+ POWER_DOMAIN_AUX_TBT1,
+ POWER_DOMAIN_AUX_TBT2,
+ POWER_DOMAIN_AUX_TBT3,
+ POWER_DOMAIN_AUX_TBT4,
+ POWER_DOMAIN_GMBUS,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_GT_IRQ,
+ POWER_DOMAIN_INIT,
+
+ POWER_DOMAIN_NUM,
+};
+
+#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
+#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
+ ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
+#define POWER_DOMAIN_TRANSCODER(tran) \
+ ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
+ (tran) + POWER_DOMAIN_TRANSCODER_A)
+
+struct i915_power_well;
+
+struct i915_power_well_ops {
+ /*
+ * Synchronize the well's hw state to match the current sw state, for
+ * example enable/disable it based on the current refcount. Called
+ * during driver init and resume time, possibly after first calling
+ * the enable/disable handlers.
+ */
+ void (*sync_hw)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+ /*
+ * Enable the well and resources that depend on it (for example
+ * interrupts located on the well). Called after the 0->1 refcount
+ * transition.
+ */
+ void (*enable)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+ /*
+ * Disable the well and resources that depend on it. Called after
+ * the 1->0 refcount transition.
+ */
+ void (*disable)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+ /* Returns the hw enabled state. */
+ bool (*is_enabled)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+};
+
+struct i915_power_well_regs {
+ i915_reg_t bios;
+ i915_reg_t driver;
+ i915_reg_t kvmr;
+ i915_reg_t debug;
+};
+
+/* Power well structure for haswell */
+struct i915_power_well_desc {
+ const char *name;
+ bool always_on;
+ u64 domains;
+ /* unique identifier for this power well */
+ enum i915_power_well_id id;
+ /*
+ * Arbitraty data associated with this power well. Platform and power
+ * well specific.
+ */
+ union {
+ struct {
+ /*
+ * request/status flag index in the PUNIT power well
+ * control/status registers.
+ */
+ u8 idx;
+ } vlv;
+ struct {
+ enum dpio_phy phy;
+ } bxt;
+ struct {
+ const struct i915_power_well_regs *regs;
+ /*
+ * request/status flag index in the power well
+ * constrol/status registers.
+ */
+ u8 idx;
+ /* Mask of pipes whose IRQ logic is backed by the pw */
+ u8 irq_pipe_mask;
+ /* The pw is backing the VGA functionality */
+ bool has_vga:1;
+ bool has_fuses:1;
+ /*
+ * The pw is for an ICL+ TypeC PHY port in
+ * Thunderbolt mode.
+ */
+ bool is_tc_tbt:1;
+ } hsw;
+ };
+ const struct i915_power_well_ops *ops;
+};
+
+struct i915_power_well {
+ const struct i915_power_well_desc *desc;
+ /* power well enable/disable usage count */
+ int count;
+ /* cached hw enabled state */
+ bool hw_enabled;
+};
+
+struct i915_power_domains {
+ /*
+ * Power wells needed for initialization at driver init and suspend
+ * time are on. They are kept on until after the first modeset.
+ */
+ bool initializing;
+ bool display_core_suspended;
+ int power_well_count;
+
+ intel_wakeref_t wakeref;
+
+ struct mutex lock;
+ int domain_use_count[POWER_DOMAIN_NUM];
+
+ struct delayed_work async_put_work;
+ intel_wakeref_t async_put_wakeref;
+ u64 async_put_domains[2];
+
+ struct i915_power_well *power_wells;
+};
+
+#define for_each_power_domain(domain, mask) \
+ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
+ for_each_if(BIT_ULL(domain) & (mask))
+
+#define for_each_power_well(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
+ (__power_well) - (__dev_priv)->power_domains.power_wells < \
+ (__dev_priv)->power_domains.power_well_count; \
+ (__power_well)++)
+
+#define for_each_power_well_reverse(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
+ (__dev_priv)->power_domains.power_well_count - 1; \
+ (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
+ (__power_well)--)
+
+#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
+ for_each_power_well(__dev_priv, __power_well) \
+ for_each_if((__power_well)->desc->domains & (__domain_mask))
+
+#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \
+ for_each_power_well_reverse(__dev_priv, __power_well) \
+ for_each_if((__power_well)->desc->domains & (__domain_mask))
+
+void skl_enable_dc6(struct drm_i915_private *dev_priv);
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
+void bxt_enable_dc9(struct drm_i915_private *dev_priv);
+void bxt_disable_dc9(struct drm_i915_private *dev_priv);
+void gen9_enable_dc5(struct drm_i915_private *dev_priv);
+
+int intel_power_domains_init(struct drm_i915_private *dev_priv);
+void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
+void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
+void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
+void icl_display_core_uninit(struct drm_i915_private *dev_priv);
+void intel_power_domains_enable(struct drm_i915_private *dev_priv);
+void intel_power_domains_disable(struct drm_i915_private *dev_priv);
+void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
+ enum i915_drm_suspend_mode);
+void intel_power_domains_resume(struct drm_i915_private *dev_priv);
+void hsw_enable_pc8(struct drm_i915_private *dev_priv);
+void hsw_disable_pc8(struct drm_i915_private *dev_priv);
+void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
+void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
+
+const char *
+intel_display_power_domain_str(enum intel_display_power_domain domain);
+
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+intel_wakeref_t
+intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void __intel_display_power_put_async(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref);
+void intel_display_power_flush_work(struct drm_i915_private *i915);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref);
+static inline void
+intel_display_power_put_async(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ __intel_display_power_put_async(i915, domain, wakeref);
+}
+#else
+static inline void
+intel_display_power_put(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ intel_display_power_put_unchecked(i915, domain);
+}
+
+static inline void
+intel_display_power_put_async(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ __intel_display_power_put_async(i915, domain, -1);
+}
+#endif
+
+#define with_intel_display_power(i915, domain, wf) \
+ for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
+ intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
+
+void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
+ u8 req_slices);
+
+void chv_phy_powergate_lanes(struct intel_encoder *encoder,
+ bool override, unsigned int mask);
+bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ enum dpio_channel ch, bool override);
+
+#endif /* __INTEL_DISPLAY_POWER_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 560274d1c50b..4336df46fe78 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -31,6 +31,7 @@
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/types.h>
+
#include <asm/byteorder.h>
#include <drm/drm_atomic_helper.h>
@@ -41,18 +42,27 @@
#include <drm/drm_probe_helper.h>
#include <drm/i915_drm.h>
+#include "i915_debugfs.h"
#include "i915_drv.h"
+#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_dp.h"
+#include "intel_dp_link_training.h"
+#include "intel_dp_mst.h"
+#include "intel_dpio_phy.h"
#include "intel_drv.h"
+#include "intel_fifo_underrun.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
+#include "intel_hotplug.h"
#include "intel_lspcon.h"
#include "intel_lvds.h"
#include "intel_panel.h"
#include "intel_psr.h"
+#include "intel_sideband.h"
+#include "intel_vdsc.h"
#define DP_DPRX_ESI_LEN 14
@@ -206,14 +216,17 @@ static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+ intel_wakeref_t wakeref;
u32 lane_info;
if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC)
return 4;
- lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
- DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
- DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+ lane_info = 0;
+ with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
+ lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
+ DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
+ DP_LANE_ASSIGNMENT_SHIFT(tc_port);
switch (lane_info) {
default:
@@ -319,6 +332,7 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
enum port port = dig_port->base.port;
if (intel_port_is_combophy(dev_priv, port) &&
+ !IS_ELKHARTLAKE(dev_priv) &&
!intel_dp_is_edp(intel_dp))
return 540000;
@@ -1068,13 +1082,13 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
static u32
intel_dp_aux_wait_done(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
u32 status;
bool done;
-#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ done = wait_event_timeout(i915->gmbus_wait_queue, C,
msecs_to_jiffies_timeout(10));
/* just trace the final value */
@@ -1207,11 +1221,15 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
u32 aux_send_ctl_flags)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv =
+ struct drm_i915_private *i915 =
to_i915(intel_dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
i915_reg_t ch_ctl, ch_data[5];
u32 aux_clock_divider;
- intel_wakeref_t wakeref;
+ enum intel_display_power_domain aux_domain =
+ intel_aux_power_domain(intel_dig_port);
+ intel_wakeref_t aux_wakeref;
+ intel_wakeref_t pps_wakeref;
int i, ret, recv_bytes;
int try, clock = 0;
u32 status;
@@ -1221,7 +1239,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (i = 0; i < ARRAY_SIZE(ch_data); i++)
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
- wakeref = pps_lock(intel_dp);
+ aux_wakeref = intel_display_power_get(i915, aux_domain);
+ pps_wakeref = pps_lock(intel_dp);
/*
* We will be called with VDD already enabled for dpcd/edid/oui reads.
@@ -1235,13 +1254,13 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
* lowest possible wakeup latency and so prevent the cpu from going into
* deep sleep states.
*/
- pm_qos_update_request(&dev_priv->pm_qos, 0);
+ pm_qos_update_request(&i915->pm_qos, 0);
intel_dp_check_edp(intel_dp);
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
- status = I915_READ_NOTRACE(ch_ctl);
+ status = intel_uncore_read_notrace(uncore, ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
msleep(1);
@@ -1251,7 +1270,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
if (try == 3) {
static u32 last_status = -1;
- const u32 status = I915_READ(ch_ctl);
+ const u32 status = intel_uncore_read(uncore, ch_ctl);
if (status != last_status) {
WARN(1, "dp_aux_ch not started status 0x%08x\n",
@@ -1280,21 +1299,23 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
for (i = 0; i < send_bytes; i += 4)
- I915_WRITE(ch_data[i >> 2],
- intel_dp_pack_aux(send + i,
- send_bytes - i));
+ intel_uncore_write(uncore,
+ ch_data[i >> 2],
+ intel_dp_pack_aux(send + i,
+ send_bytes - i));
/* Send the command and wait for it to complete */
- I915_WRITE(ch_ctl, send_ctl);
+ intel_uncore_write(uncore, ch_ctl, send_ctl);
status = intel_dp_aux_wait_done(intel_dp);
/* Clear done status and any errors */
- I915_WRITE(ch_ctl,
- status |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
+ intel_uncore_write(uncore,
+ ch_ctl,
+ status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
* 400us delay required for errors and timeouts
@@ -1357,17 +1378,18 @@ done:
recv_bytes = recv_size;
for (i = 0; i < recv_bytes; i += 4)
- intel_dp_unpack_aux(I915_READ(ch_data[i >> 2]),
+ intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
recv + i, recv_bytes - i);
ret = recv_bytes;
out:
- pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
+ pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
if (vdd)
edp_panel_vdd_off(intel_dp, false);
- pps_unlock(intel_dp, wakeref);
+ pps_unlock(intel_dp, pps_wakeref);
+ intel_display_power_put_async(i915, aux_domain, aux_wakeref);
return ret;
}
@@ -1832,6 +1854,19 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
}
}
+static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp)
+{
+ /*
+ * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
+ * format of the number of bytes per pixel will be half the number
+ * of bytes of RGB pixel.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ bpp /= 2;
+
+ return bpp;
+}
+
/* Optimize link config in order: max bpp, min clock, min lanes */
static int
intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
@@ -2075,6 +2110,36 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
return 0;
}
+static int
+intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
+ struct drm_connector *connector,
+ struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_info *info = &connector->display_info;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ int ret;
+
+ if (!drm_mode_is_420_only(info, adjusted_mode) ||
+ !intel_dp_get_colorimetry_status(intel_dp) ||
+ !connector->ycbcr_420_allowed)
+ return 0;
+
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
+
+ /* YCBCR 420 output conversion needs a scaler */
+ ret = skl_update_scaler_crtc(crtc_state);
+ if (ret) {
+ DRM_DEBUG_KMS("Scaler allocation for output failed\n");
+ return ret;
+ }
+
+ intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN);
+
+ return 0;
+}
+
bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -2114,7 +2179,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
to_intel_digital_connector_state(conn_state);
bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_CONSTANT_N);
- int ret, output_bpp;
+ int ret = 0, output_bpp;
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
pipe_config->has_pch_encoder = true;
@@ -2122,6 +2187,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
if (lspcon->active)
lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
+ else
+ ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base,
+ pipe_config);
+
+ if (ret)
+ return ret;
pipe_config->has_drrs = false;
if (IS_G4X(dev_priv) || port == PORT_A)
@@ -2169,7 +2240,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (pipe_config->dsc_params.compression_enable)
output_bpp = pipe_config->dsc_params.compressed_bpp;
else
- output_bpp = pipe_config->pipe_bpp;
+ output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
intel_link_compute_m_n(output_bpp,
pipe_config->lane_count,
@@ -3148,12 +3219,12 @@ static void chv_post_disable_dp(struct intel_encoder *encoder,
intel_dp_link_down(encoder, old_crtc_state);
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, old_crtc_state, true);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
}
static void
@@ -3927,9 +3998,6 @@ intel_dp_link_down(struct intel_encoder *encoder,
enum port port = encoder->port;
u32 DP = intel_dp->DP;
- if (WARN_ON(HAS_DDI(dev_priv)))
- return;
-
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
return;
@@ -4041,6 +4109,16 @@ intel_dp_read_dpcd(struct intel_dp *intel_dp)
return intel_dp->dpcd[DP_DPCD_REV] != 0;
}
+bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
+{
+ u8 dprx = 0;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
+ &dprx) != 1)
+ return false;
+ return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
+}
+
static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
{
/*
@@ -4351,6 +4429,96 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
return 0;
}
+static void
+intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct dp_sdp vsc_sdp = {};
+
+ /* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 */
+ vsc_sdp.sdp_header.HB0 = 0;
+ vsc_sdp.sdp_header.HB1 = 0x7;
+
+ /*
+ * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
+ * Colorimetry Format indication.
+ */
+ vsc_sdp.sdp_header.HB2 = 0x5;
+
+ /*
+ * VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/
+ * Colorimetry Format indication (HB2 = 05h).
+ */
+ vsc_sdp.sdp_header.HB3 = 0x13;
+
+ /*
+ * YCbCr 420 = 3h DB16[7:4] ITU-R BT.601 = 0h, ITU-R BT.709 = 1h
+ * DB16[3:0] DP 1.4a spec, Table 2-120
+ */
+ vsc_sdp.db[16] = 0x3 << 4; /* 0x3 << 4 , YCbCr 420*/
+ /* RGB->YCBCR color conversion uses the BT.709 color space. */
+ vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
+
+ /*
+ * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only,
+ * the following Component Bit Depth values are defined:
+ * 001b = 8bpc.
+ * 010b = 10bpc.
+ * 011b = 12bpc.
+ * 100b = 16bpc.
+ */
+ switch (crtc_state->pipe_bpp) {
+ case 24: /* 8bpc */
+ vsc_sdp.db[17] = 0x1;
+ break;
+ case 30: /* 10bpc */
+ vsc_sdp.db[17] = 0x2;
+ break;
+ case 36: /* 12bpc */
+ vsc_sdp.db[17] = 0x3;
+ break;
+ case 48: /* 16bpc */
+ vsc_sdp.db[17] = 0x4;
+ break;
+ default:
+ MISSING_CASE(crtc_state->pipe_bpp);
+ break;
+ }
+
+ /*
+ * Dynamic Range (Bit 7)
+ * 0 = VESA range, 1 = CTA range.
+ * all YCbCr are always limited range
+ */
+ vsc_sdp.db[17] |= 0x80;
+
+ /*
+ * Content Type (Bits 2:0)
+ * 000b = Not defined.
+ * 001b = Graphics.
+ * 010b = Photo.
+ * 011b = Video.
+ * 100b = Game
+ * All other values are RESERVED.
+ * Note: See CTA-861-G for the definition and expected
+ * processing by a stream sink for the above contect types.
+ */
+ vsc_sdp.db[18] = 0;
+
+ intel_dig_port->write_infoframe(&intel_dig_port->base,
+ crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
+}
+
+void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
+ return;
+
+ intel_pixel_encoding_setup_vsc(intel_dp, crtc_state);
+}
+
static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
{
int status = 0;
@@ -4829,15 +4997,15 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
u8 *dpcd = intel_dp->dpcd;
u8 type;
+ if (WARN_ON(intel_dp_is_edp(intel_dp)))
+ return connector_status_connected;
+
if (lspcon->active)
lspcon_resume(lspcon);
if (!intel_dp_get_dpcd(intel_dp))
return connector_status_disconnected;
- if (intel_dp_is_edp(intel_dp))
- return connector_status_connected;
-
/* if there's no downstream port, we're done */
if (!drm_dp_is_branch(dpcd))
return connector_status_connected;
@@ -5219,12 +5387,15 @@ static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
u32 dpsp;
/*
- * WARN if we got a legacy port HPD, but VBT didn't mark the port as
+ * Complain if we got a legacy port HPD, but VBT didn't mark the port as
* legacy. Treat the port as legacy from now on.
*/
- if (WARN_ON(!intel_dig_port->tc_legacy_port &&
- I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)))
+ if (!intel_dig_port->tc_legacy_port &&
+ I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) {
+ DRM_ERROR("VBT incorrectly claims port %c is not TypeC legacy\n",
+ port_name(port));
intel_dig_port->tc_legacy_port = true;
+ }
is_legacy = intel_dig_port->tc_legacy_port;
/*
@@ -5276,7 +5447,7 @@ static bool icl_digital_port_connected(struct intel_encoder *encoder)
*
* Return %true if port is connected, %false otherwise.
*/
-bool intel_digital_port_connected(struct intel_encoder *encoder)
+static bool __intel_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -5306,6 +5477,18 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
return false;
}
+bool intel_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ bool is_connected = false;
+ intel_wakeref_t wakeref;
+
+ with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
+ is_connected = __intel_digital_port_connected(encoder);
+
+ return is_connected;
+}
+
static struct edid *
intel_dp_get_edid(struct intel_dp *intel_dp)
{
@@ -5359,16 +5542,11 @@ intel_dp_detect(struct drm_connector *connector,
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum drm_connector_status status;
- enum intel_display_power_domain aux_domain =
- intel_aux_power_domain(dig_port);
- intel_wakeref_t wakeref;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
- wakeref = intel_display_power_get(dev_priv, aux_domain);
-
/* Can't disconnect eDP */
if (intel_dp_is_edp(intel_dp))
status = edp_detect(intel_dp);
@@ -5432,10 +5610,8 @@ intel_dp_detect(struct drm_connector *connector,
int ret;
ret = intel_dp_retrain_link(encoder, ctx);
- if (ret) {
- intel_display_power_put(dev_priv, aux_domain, wakeref);
+ if (ret)
return ret;
- }
}
/*
@@ -5457,7 +5633,6 @@ out:
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
- intel_display_power_put(dev_priv, aux_domain, wakeref);
return status;
}
@@ -6235,6 +6410,10 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
intel_dp->reset_link_params = true;
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+ !intel_dp_is_edp(intel_dp))
+ return;
+
with_pps_lock(intel_dp, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_dp->active_pipe = vlv_active_pipe(intel_dp);
@@ -6278,9 +6457,6 @@ enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum irqreturn ret = IRQ_NONE;
- intel_wakeref_t wakeref;
if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
/*
@@ -6303,9 +6479,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
return IRQ_NONE;
}
- wakeref = intel_display_power_get(dev_priv,
- intel_aux_power_domain(intel_dig_port));
-
if (intel_dp->is_mst) {
if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
/*
@@ -6317,7 +6490,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
- goto put_power;
+
+ return IRQ_NONE;
}
}
@@ -6327,17 +6501,10 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
handled = intel_dp_short_pulse(intel_dp);
if (!handled)
- goto put_power;
+ return IRQ_NONE;
}
- ret = IRQ_HANDLED;
-
-put_power:
- intel_display_power_put(dev_priv,
- intel_aux_power_domain(intel_dig_port),
- wakeref);
-
- return ret;
+ return IRQ_HANDLED;
}
/* check the VBT to see whether the eDP is on another port */
@@ -7182,18 +7349,20 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->pps_pipe = INVALID_PIPE;
intel_dp->active_pipe = INVALID_PIPE;
- /* intel_dp vfuncs */
- if (HAS_DDI(dev_priv))
- intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
-
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
- if (intel_dp_is_port_edp(dev_priv, port))
+ if (intel_dp_is_port_edp(dev_priv, port)) {
+ /*
+ * Currently we don't support eDP on TypeC ports, although in
+ * theory it could work on TypeC legacy ports.
+ */
+ WARN_ON(intel_port_is_tc(dev_priv, port));
type = DRM_MODE_CONNECTOR_eDP;
- else
+ } else {
type = DRM_MODE_CONNECTOR_DisplayPort;
+ }
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_dp->active_pipe = vlv_active_pipe(intel_dp);
@@ -7223,6 +7392,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
+ if (INTEL_GEN(dev_priv) >= 11)
+ connector->ycbcr_420_allowed = true;
+
intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
intel_dp_aux_init(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 5e9e8d13de6e..da70b1a41c83 100644
--- a/drivers/gpu/drm/i915/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -108,6 +108,7 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
int mode_hdisplay);
bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
+bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
bool intel_digital_port_connected(struct intel_encoder *encoder);
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 357136f17f85..7ded95a334db 100644
--- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -22,6 +22,7 @@
*
*/
+#include "intel_dp_aux_backlight.h"
#include "intel_drv.h"
static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.h b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.h
new file mode 100644
index 000000000000..ed60c2858967
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_AUX_BACKLIGHT_H__
+#define __INTEL_DP_AUX_BACKLIGHT_H__
+
+struct intel_connector;
+
+int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
+
+#endif /* __INTEL_DP_AUX_BACKLIGHT_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 54b069333e2f..9b1fccea966b 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -22,6 +22,7 @@
*/
#include "intel_dp.h"
+#include "intel_dp_link_training.h"
#include "intel_drv.h"
static void
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
new file mode 100644
index 000000000000..174566adcc92
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_LINK_TRAINING_H__
+#define __INTEL_DP_LINK_TRAINING_H__
+
+struct intel_dp;
+
+void intel_dp_start_link_train(struct intel_dp *intel_dp);
+void intel_dp_stop_link_train(struct intel_dp *intel_dp);
+
+#endif /* __INTEL_DP_LINK_TRAINING_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 8839eaea8371..60652ebbdf61 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -28,10 +28,13 @@
#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
+#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_dp.h"
+#include "intel_dp_mst.h"
+#include "intel_dpio_phy.h"
#include "intel_drv.h"
static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
@@ -148,9 +151,10 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
static int
intel_dp_mst_atomic_check(struct drm_connector *connector,
- struct drm_connector_state *new_conn_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = new_conn_state->state;
+ struct drm_connector_state *new_conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
struct drm_connector_state *old_conn_state =
drm_atomic_get_old_connector_state(state, connector);
struct intel_connector *intel_connector =
@@ -160,7 +164,7 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
struct drm_dp_mst_topology_mgr *mgr;
int ret;
- ret = intel_digital_connector_atomic_check(connector, new_conn_state);
+ ret = intel_digital_connector_atomic_check(connector, state);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
new file mode 100644
index 000000000000..1470c6e0514b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_MST_H__
+#define __INTEL_DP_MST_H__
+
+struct intel_digital_port;
+
+int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
+void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
+
+#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index ab4ac7158b79..7ccf7f3974db 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -21,8 +21,11 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include "intel_dp.h"
+#include "display/intel_dp.h"
+
+#include "intel_dpio_phy.h"
#include "intel_drv.h"
+#include "intel_sideband.h"
/**
* DOC: DPIO
@@ -648,7 +651,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 val;
int i;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
@@ -729,8 +732,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
}
- mutex_unlock(&dev_priv->sb_lock);
-
+ vlv_dpio_put(dev_priv);
}
void chv_data_lane_soft_reset(struct intel_encoder *encoder,
@@ -800,7 +802,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
chv_phy_powergate_lanes(encoder, true, lane_mask);
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, crtc_state, true);
@@ -855,7 +857,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
val |= CHV_CMN_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
}
void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
@@ -870,7 +872,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
int data, i, stagger;
u32 val;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* allow hardware to manage TX FIFO reset source */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
@@ -935,7 +937,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
/* Deassert data lane reset */
chv_data_lane_soft_reset(encoder, crtc_state, false);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
}
void chv_phy_release_cl2_override(struct intel_encoder *encoder)
@@ -956,7 +958,7 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder,
enum pipe pipe = to_intel_crtc(old_crtc_state->base.crtc)->pipe;
u32 val;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* disable left/right clock distribution */
if (pipe != PIPE_B) {
@@ -969,7 +971,7 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder,
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
}
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
/*
* Leave the power down bit cleared for at least one
@@ -993,7 +995,8 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
enum dpio_channel port = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
+
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
@@ -1006,7 +1009,8 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
- mutex_unlock(&dev_priv->sb_lock);
+
+ vlv_dpio_put(dev_priv);
}
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
@@ -1019,7 +1023,8 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
enum pipe pipe = crtc->pipe;
/* Program Tx lane resets to default */
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
+
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
@@ -1033,7 +1038,8 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
- mutex_unlock(&dev_priv->sb_lock);
+
+ vlv_dpio_put(dev_priv);
}
void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
@@ -1047,7 +1053,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
enum pipe pipe = crtc->pipe;
u32 val;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* Enable clock channels for this port */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
@@ -1063,7 +1069,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
}
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
@@ -1075,8 +1081,8 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder,
enum dpio_channel port = vlv_dport_to_channel(dport);
enum pipe pipe = crtc->pipe;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
new file mode 100644
index 000000000000..f418aab90b7e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DPIO_PHY_H__
+#define __INTEL_DPIO_PHY_H__
+
+#include <linux/types.h>
+
+enum dpio_channel;
+enum dpio_phy;
+enum port;
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_encoder;
+
+void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
+ enum dpio_phy *phy, enum dpio_channel *ch);
+void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
+ enum port port, u32 margin, u32 scale,
+ u32 enable, u32 deemphasis);
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count);
+void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ u8 lane_lat_optim_mask);
+u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
+
+void chv_set_phy_signal_level(struct intel_encoder *encoder,
+ u32 deemph_reg_value, u32 margin_reg_value,
+ bool uniq_trans_scale);
+void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool reset);
+void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void chv_phy_release_cl2_override(struct intel_encoder *encoder);
+void chv_phy_post_pll_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state);
+
+void vlv_set_phy_signal_level(struct intel_encoder *encoder,
+ u32 demph_reg_value, u32 preemph_reg_value,
+ u32 uniqtranscale_reg_value, u32 tx3_demph);
+void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void vlv_phy_reset_lanes(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state);
+
+#endif /* __INTEL_DPIO_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index e01c057ce50b..2d4e7b9a7b9d 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -21,6 +21,8 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "intel_dpio_phy.h"
+#include "intel_dpll_mgr.h"
#include "intel_drv.h"
/**
@@ -349,7 +351,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
u32 val;
wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_PLLS);
+ POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
@@ -358,7 +360,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->fp0 = I915_READ(PCH_FP0(id));
hw_state->fp1 = I915_READ(PCH_FP1(id));
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return val & DPLL_VCO_ENABLE;
}
@@ -452,7 +454,7 @@ ibx_get_dpll(struct intel_crtc_state *crtc_state,
}
static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *hw_state)
+ const struct intel_dpll_hw_state *hw_state)
{
DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
"fp0: 0x%x, fp1: 0x%x\n",
@@ -517,14 +519,14 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
u32 val;
wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_PLLS);
+ POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
val = I915_READ(WRPLL_CTL(id));
hw_state->wrpll = val;
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return val & WRPLL_PLL_ENABLE;
}
@@ -537,14 +539,14 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
u32 val;
wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_PLLS);
+ POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
val = I915_READ(SPLL_CTL);
hw_state->spll = val;
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return val & SPLL_PLL_ENABLE;
}
@@ -773,7 +775,7 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *
hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
- val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
+ val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
@@ -837,7 +839,7 @@ hsw_get_dpll(struct intel_crtc_state *crtc_state,
return NULL;
crtc_state->dpll_hw_state.spll =
- SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
+ SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
pll = intel_find_shared_dpll(crtc_state,
DPLL_ID_SPLL, DPLL_ID_SPLL);
@@ -854,7 +856,7 @@ hsw_get_dpll(struct intel_crtc_state *crtc_state,
}
static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *hw_state)
+ const struct intel_dpll_hw_state *hw_state)
{
DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
hw_state->wrpll, hw_state->spll);
@@ -1002,7 +1004,7 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
bool ret;
wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_PLLS);
+ POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
@@ -1023,7 +1025,7 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
}
@@ -1039,7 +1041,7 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
bool ret;
wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_PLLS);
+ POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
@@ -1056,7 +1058,7 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
}
@@ -1423,7 +1425,7 @@ skl_get_dpll(struct intel_crtc_state *crtc_state,
}
static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *hw_state)
+ const struct intel_dpll_hw_state *hw_state)
{
DRM_DEBUG_KMS("dpll_hw_state: "
"ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
@@ -1600,7 +1602,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_PLLS);
+ POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
@@ -1658,7 +1660,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
}
@@ -1855,7 +1857,7 @@ bxt_get_dpll(struct intel_crtc_state *crtc_state,
}
static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *hw_state)
+ const struct intel_dpll_hw_state *hw_state)
{
DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
"pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
@@ -1879,27 +1881,6 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
.get_hw_state = bxt_ddi_pll_get_hw_state,
};
-static void intel_ddi_pll_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (INTEL_GEN(dev_priv) < 9) {
- u32 val = I915_READ(LCPLL_CTL);
-
- /*
- * The LCPLL register should be turned on by the BIOS. For now
- * let's just check its state and print errors in case
- * something is wrong. Don't even try to turn it on.
- */
-
- if (val & LCPLL_CD_SOURCE_FCLK)
- DRM_ERROR("CDCLK source is not LCPLL\n");
-
- if (val & LCPLL_PLL_DISABLE)
- DRM_ERROR("LCPLL is disabled\n");
- }
-}
-
struct intel_dpll_mgr {
const struct dpll_info *dpll_info;
@@ -1907,7 +1888,7 @@ struct intel_dpll_mgr {
struct intel_encoder *encoder);
void (*dump_hw_state)(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *hw_state);
+ const struct intel_dpll_hw_state *hw_state);
};
static const struct dpll_info pch_plls[] = {
@@ -2106,7 +2087,7 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
bool ret;
wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_PLLS);
+ POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
@@ -2126,7 +2107,7 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
}
@@ -2390,7 +2371,7 @@ cnl_get_dpll(struct intel_crtc_state *crtc_state,
}
static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *hw_state)
+ const struct intel_dpll_hw_state *hw_state)
{
DRM_DEBUG_KMS("dpll_hw_state: "
"cfgcr0: 0x%x, cfgcr1: 0x%x\n",
@@ -2741,11 +2722,11 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
}
if (use_ssc) {
- tmp = (u64)dco_khz * 47 * 32;
+ tmp = mul_u32_u32(dco_khz, 47 * 32);
do_div(tmp, refclk_khz * m1div * 10000);
ssc_stepsize = tmp;
- tmp = (u64)dco_khz * 1000;
+ tmp = mul_u32_u32(dco_khz, 1000);
ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
} else {
ssc_stepsize = 0;
@@ -2881,7 +2862,7 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
u32 val;
wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_PLLS);
+ POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
@@ -2928,7 +2909,7 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
}
@@ -2943,7 +2924,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
u32 val;
wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_PLLS);
+ POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
@@ -2956,7 +2937,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
}
@@ -3190,7 +3171,7 @@ static void mg_pll_disable(struct drm_i915_private *dev_priv,
}
static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *hw_state)
+ const struct intel_dpll_hw_state *hw_state)
{
DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
"mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
@@ -3303,10 +3284,6 @@ void intel_shared_dpll_init(struct drm_device *dev)
mutex_init(&dev_priv->dpll_lock);
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
-
- /* FIXME: Move this to a more suitable place */
- if (HAS_DDI(dev_priv))
- intel_ddi_pll_init(dev);
}
/**
@@ -3364,7 +3341,7 @@ void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
* Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
*/
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *hw_state)
+ const struct intel_dpll_hw_state *hw_state)
{
if (dev_priv->dpll_mgr) {
dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index bd8124cc81ed..d0570414f3d1 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -25,6 +25,10 @@
#ifndef _INTEL_DPLL_MGR_H_
#define _INTEL_DPLL_MGR_H_
+#include <linux/types.h>
+
+#include "intel_display.h"
+
/*FIXME: Move this to a more appropriate place. */
#define abs_diff(a, b) ({ \
typeof(a) __a = (a); \
@@ -32,13 +36,13 @@
(void) (&__a == &__b); \
__a > __b ? (__a - __b) : (__b - __a); })
+struct drm_atomic_state;
+struct drm_device;
struct drm_i915_private;
struct intel_crtc;
struct intel_crtc_state;
struct intel_encoder;
-
struct intel_shared_dpll;
-struct intel_dpll_mgr;
/**
* enum intel_dpll_id - possible DPLL ids
@@ -289,7 +293,7 @@ struct intel_shared_dpll {
/**
* @state:
*
- * Store the state for the pll, including the its hw state
+ * Store the state for the pll, including its hw state
* and CRTCs using it.
*/
struct intel_shared_dpll_state state;
@@ -339,7 +343,7 @@ void intel_shared_dpll_swap_state(struct drm_atomic_state *state);
void intel_shared_dpll_init(struct drm_device *dev);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *hw_state);
+ const struct intel_dpll_hw_state *hw_state);
int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index 5fec02aceaed..5fec02aceaed 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index 705a609050c0..6d20434636cd 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -28,6 +28,9 @@
#include <drm/drm_mipi_dsi.h>
#include "intel_drv.h"
+#define INTEL_DSI_VIDEO_MODE 0
+#define INTEL_DSI_COMMAND_MODE 1
+
/* Dual Link support */
#define DSI_DUAL_LINK_NONE 0
#define DSI_DUAL_LINK_FRONT_BACK 1
@@ -151,6 +154,9 @@ static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder)
return enc_to_intel_dsi(&encoder->base)->ports;
}
+/* icl_dsi.c */
+void icl_dsi_init(struct drm_i915_private *dev_priv);
+
/* intel_dsi.c */
int intel_dsi_bitrate(const struct intel_dsi *intel_dsi);
int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi);
@@ -166,6 +172,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
const struct mipi_dsi_host_ops *funcs,
enum port port);
+void vlv_dsi_init(struct drm_i915_private *dev_priv);
/* vlv_dsi_pll.c */
int vlv_dsi_pll_compute(struct intel_encoder *encoder,
@@ -192,5 +199,6 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
+void intel_dsi_log_params(struct intel_dsi *intel_dsi);
#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 150a156f3b1e..8c33262cb0b2 100644
--- a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -23,11 +23,13 @@
* Author: Deepak M <m.deepak at intel.com>
*/
+#include <drm/drm_mipi_dsi.h>
+#include <video/mipi_display.h>
+
+#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
-#include "i915_drv.h"
-#include <video/mipi_display.h>
-#include <drm/drm_mipi_dsi.h>
+#include "intel_dsi_dcs_backlight.h"
#define CONTROL_DISPLAY_BCTRL (1 << 5)
#define CONTROL_DISPLAY_DD (1 << 3)
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.h b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.h
new file mode 100644
index 000000000000..eb01947843bf
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DSI_DCS_BACKLIGHT_H__
+#define __INTEL_DSI_DCS_BACKLIGHT_H__
+
+struct intel_connector;
+
+int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
+
+#endif /* __INTEL_DSI_DCS_BACKLIGHT_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 4b8e48db1843..e5b178660408 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -24,30 +24,28 @@
*
*/
-#include <drm/drm_crtc.h>
-#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include <linux/gpio/consumer.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/slab.h>
-#include <video/mipi_display.h>
+
#include <asm/intel-mid.h>
#include <asm/unaligned.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/i915_drm.h>
+
+#include <video/mipi_display.h>
+
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
+#include "intel_sideband.h"
#define MIPI_TRANSFER_MODE_SHIFT 0
#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
#define MIPI_PORT_SHIFT 3
-#define PREPARE_CNT_MAX 0x3F
-#define EXIT_ZERO_CNT_MAX 0x3F
-#define CLK_ZERO_CNT_MAX 0xFF
-#define TRAIL_CNT_MAX 0x1F
-
-#define NS_KHZ_RATIO 1000000
-
/* base offsets for gpio pads */
#define VLV_GPIO_NC_0_HV_DDI0_HPD 0x4130
#define VLV_GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120
@@ -248,7 +246,7 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
pconf0 = VLV_GPIO_PCONF0(map->base_offset);
padval = VLV_GPIO_PAD_VAL(map->base_offset);
- mutex_lock(&dev_priv->sb_lock);
+ vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO));
if (!map->init) {
/* FIXME: remove constant below */
vlv_iosf_sb_write(dev_priv, port, pconf0, 0x2000CC00);
@@ -257,7 +255,7 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
tmp = 0x4 | value;
vlv_iosf_sb_write(dev_priv, port, padval, tmp);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
}
static void chv_exec_gpio(struct drm_i915_private *dev_priv,
@@ -303,12 +301,12 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index);
cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index);
- mutex_lock(&dev_priv->sb_lock);
+ vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO));
vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
vlv_iosf_sb_write(dev_priv, port, cfg0,
CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO |
CHV_GPIO_GPIOTXSTATE(value));
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
}
static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
@@ -532,268 +530,42 @@ void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
msleep(msec);
}
-#define ICL_PREPARE_CNT_MAX 0x7
-#define ICL_CLK_ZERO_CNT_MAX 0xf
-#define ICL_TRAIL_CNT_MAX 0x7
-#define ICL_TCLK_PRE_CNT_MAX 0x3
-#define ICL_TCLK_POST_CNT_MAX 0x7
-#define ICL_HS_ZERO_CNT_MAX 0xf
-#define ICL_EXIT_ZERO_CNT_MAX 0x7
-
-static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
+void intel_dsi_log_params(struct intel_dsi *intel_dsi)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
- u32 tlpx_ns;
- u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
- u32 ths_prepare_ns, tclk_trail_ns;
- u32 hs_zero_cnt;
- u32 tclk_pre_cnt, tclk_post_cnt;
-
- tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
-
- tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
- ths_prepare_ns = max(mipi_config->ths_prepare,
- mipi_config->tclk_prepare);
-
- /*
- * prepare cnt in escape clocks
- * this field represents a hexadecimal value with a precision
- * of 1.2 – i.e. the most significant bit is the integer
- * and the least significant 2 bits are fraction bits.
- * so, the field can represent a range of 0.25 to 1.75
- */
- prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
- if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
- DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt);
- prepare_cnt = ICL_PREPARE_CNT_MAX;
- }
-
- /* clk zero count in escape clocks */
- clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
- ths_prepare_ns, tlpx_ns);
- if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
- clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
- }
-
- /* trail cnt in escape clocks*/
- trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
- if (trail_cnt > ICL_TRAIL_CNT_MAX) {
- DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt);
- trail_cnt = ICL_TRAIL_CNT_MAX;
- }
-
- /* tclk pre count in escape clocks */
- tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
- if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
- DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
- tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
- }
-
- /* tclk post count in escape clocks */
- tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns);
- if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) {
- DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt);
- tclk_post_cnt = ICL_TCLK_POST_CNT_MAX;
- }
-
- /* hs zero cnt in escape clocks */
- hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
- ths_prepare_ns, tlpx_ns);
- if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt);
- hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
- }
-
- /* hs exit zero cnt in escape clocks */
- exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
- if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt);
- exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
- }
-
- /* clock lane dphy timings */
- intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE |
- CLK_PREPARE(prepare_cnt) |
- CLK_ZERO_OVERRIDE |
- CLK_ZERO(clk_zero_cnt) |
- CLK_PRE_OVERRIDE |
- CLK_PRE(tclk_pre_cnt) |
- CLK_POST_OVERRIDE |
- CLK_POST(tclk_post_cnt) |
- CLK_TRAIL_OVERRIDE |
- CLK_TRAIL(trail_cnt));
-
- /* data lanes dphy timings */
- intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE |
- HS_PREPARE(prepare_cnt) |
- HS_ZERO_OVERRIDE |
- HS_ZERO(hs_zero_cnt) |
- HS_TRAIL_OVERRIDE |
- HS_TRAIL(trail_cnt) |
- HS_EXIT_OVERRIDE |
- HS_EXIT(exit_zero_cnt));
-}
-
-static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
-{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
- u32 tlpx_ns, extra_byte_count, tlpx_ui;
- u32 ui_num, ui_den;
- u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
- u32 ths_prepare_ns, tclk_trail_ns;
- u32 tclk_prepare_clkzero, ths_prepare_hszero;
- u32 lp_to_hs_switch, hs_to_lp_switch;
- u32 mul;
-
- tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
-
- switch (intel_dsi->lane_count) {
- case 1:
- case 2:
- extra_byte_count = 2;
- break;
- case 3:
- extra_byte_count = 4;
- break;
- case 4:
- default:
- extra_byte_count = 3;
- break;
- }
-
- /* in Kbps */
- ui_num = NS_KHZ_RATIO;
- ui_den = intel_dsi_bitrate(intel_dsi);
-
- tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero;
- ths_prepare_hszero = mipi_config->ths_prepare_hszero;
-
- /*
- * B060
- * LP byte clock = TLPX/ (8UI)
- */
- intel_dsi->lp_byte_clk = DIV_ROUND_UP(tlpx_ns * ui_den, 8 * ui_num);
-
- /* DDR clock period = 2 * UI
- * UI(sec) = 1/(bitrate * 10^3) (bitrate is in KHZ)
- * UI(nsec) = 10^6 / bitrate
- * DDR clock period (nsec) = 2 * UI = (2 * 10^6)/ bitrate
- * DDR clock count = ns_value / DDR clock period
- *
- * For GEMINILAKE dphy_param_reg will be programmed in terms of
- * HS byte clock count for other platform in HS ddr clock count
- */
- mul = IS_GEMINILAKE(dev_priv) ? 8 : 2;
- ths_prepare_ns = max(mipi_config->ths_prepare,
- mipi_config->tclk_prepare);
-
- /* prepare count */
- prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul);
-
- if (prepare_cnt > PREPARE_CNT_MAX) {
- DRM_DEBUG_KMS("prepare count too high %u\n", prepare_cnt);
- prepare_cnt = PREPARE_CNT_MAX;
- }
-
- /* exit zero count */
- exit_zero_cnt = DIV_ROUND_UP(
- (ths_prepare_hszero - ths_prepare_ns) * ui_den,
- ui_num * mul
- );
-
- /*
- * Exit zero is unified val ths_zero and ths_exit
- * minimum value for ths_exit = 110ns
- * min (exit_zero_cnt * 2) = 110/UI
- * exit_zero_cnt = 55/UI
- */
- if (exit_zero_cnt < (55 * ui_den / ui_num) && (55 * ui_den) % ui_num)
- exit_zero_cnt += 1;
-
- if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("exit zero count too high %u\n", exit_zero_cnt);
- exit_zero_cnt = EXIT_ZERO_CNT_MAX;
- }
-
- /* clk zero count */
- clk_zero_cnt = DIV_ROUND_UP(
- (tclk_prepare_clkzero - ths_prepare_ns)
- * ui_den, ui_num * mul);
-
- if (clk_zero_cnt > CLK_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("clock zero count too high %u\n", clk_zero_cnt);
- clk_zero_cnt = CLK_ZERO_CNT_MAX;
- }
-
- /* trail count */
- tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
- trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul);
-
- if (trail_cnt > TRAIL_CNT_MAX) {
- DRM_DEBUG_KMS("trail count too high %u\n", trail_cnt);
- trail_cnt = TRAIL_CNT_MAX;
- }
-
- /* B080 */
- intel_dsi->dphy_reg = exit_zero_cnt << 24 | trail_cnt << 16 |
- clk_zero_cnt << 8 | prepare_cnt;
-
- /*
- * LP to HS switch count = 4TLPX + PREP_COUNT * mul + EXIT_ZERO_COUNT *
- * mul + 10UI + Extra Byte Count
- *
- * HS to LP switch count = THS-TRAIL + 2TLPX + Extra Byte Count
- * Extra Byte Count is calculated according to number of lanes.
- * High Low Switch Count is the Max of LP to HS and
- * HS to LP switch count
- *
- */
- tlpx_ui = DIV_ROUND_UP(tlpx_ns * ui_den, ui_num);
-
- /* B044 */
- /* FIXME:
- * The comment above does not match with the code */
- lp_to_hs_switch = DIV_ROUND_UP(4 * tlpx_ui + prepare_cnt * mul +
- exit_zero_cnt * mul + 10, 8);
-
- hs_to_lp_switch = DIV_ROUND_UP(mipi_config->ths_trail + 2 * tlpx_ui, 8);
-
- intel_dsi->hs_to_lp_count = max(lp_to_hs_switch, hs_to_lp_switch);
- intel_dsi->hs_to_lp_count += extra_byte_count;
-
- /* B088 */
- /* LP -> HS for clock lanes
- * LP clk sync + LP11 + LP01 + tclk_prepare + tclk_zero +
- * extra byte count
- * 2TPLX + 1TLPX + 1 TPLX(in ns) + prepare_cnt * 2 + clk_zero_cnt *
- * 2(in UI) + extra byte count
- * In byteclks = (4TLPX + prepare_cnt * 2 + clk_zero_cnt *2 (in UI)) /
- * 8 + extra byte count
- */
- intel_dsi->clk_lp_to_hs_count =
- DIV_ROUND_UP(
- 4 * tlpx_ui + prepare_cnt * 2 +
- clk_zero_cnt * 2,
- 8);
-
- intel_dsi->clk_lp_to_hs_count += extra_byte_count;
-
- /* HS->LP for Clock Lanes
- * Low Power clock synchronisations + 1Tx byteclk + tclk_trail +
- * Extra byte count
- * 2TLPX + 8UI + (trail_count*2)(in UI) + Extra byte count
- * In byteclks = (2*TLpx(in UI) + trail_count*2 +8)(in UI)/8 +
- * Extra byte count
- */
- intel_dsi->clk_hs_to_lp_count =
- DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8,
- 8);
- intel_dsi->clk_hs_to_lp_count += extra_byte_count;
+ DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk);
+ DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap);
+ DRM_DEBUG_KMS("Lane count %d\n", intel_dsi->lane_count);
+ DRM_DEBUG_KMS("DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
+ DRM_DEBUG_KMS("Video mode format %s\n",
+ intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ?
+ "non-burst with sync pulse" :
+ intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS ?
+ "non-burst with sync events" :
+ intel_dsi->video_mode_format == VIDEO_MODE_BURST ?
+ "burst" : "<unknown>");
+ DRM_DEBUG_KMS("Burst mode ratio %d\n", intel_dsi->burst_mode_ratio);
+ DRM_DEBUG_KMS("Reset timer %d\n", intel_dsi->rst_timer_val);
+ DRM_DEBUG_KMS("Eot %s\n", enableddisabled(intel_dsi->eotp_pkt));
+ DRM_DEBUG_KMS("Clockstop %s\n", enableddisabled(!intel_dsi->clock_stop));
+ DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
+ else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT)
+ DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
+ else
+ DRM_DEBUG_KMS("Dual link: NONE\n");
+ DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format);
+ DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div);
+ DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
+ DRM_DEBUG_KMS("Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val);
+ DRM_DEBUG_KMS("Init Count 0x%x\n", intel_dsi->init_count);
+ DRM_DEBUG_KMS("HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count);
+ DRM_DEBUG_KMS("LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
+ DRM_DEBUG_KMS("DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
+ DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
+ DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
+ DRM_DEBUG_KMS("BTA %s\n",
+ enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
}
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
@@ -883,46 +655,6 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->burst_mode_ratio = burst_mode_ratio;
- if (INTEL_GEN(dev_priv) >= 11)
- icl_dphy_param_init(intel_dsi);
- else
- vlv_dphy_param_init(intel_dsi);
-
- DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk);
- DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap);
- DRM_DEBUG_KMS("Lane count %d\n", intel_dsi->lane_count);
- DRM_DEBUG_KMS("DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
- DRM_DEBUG_KMS("Video mode format %s\n",
- intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ?
- "non-burst with sync pulse" :
- intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS ?
- "non-burst with sync events" :
- intel_dsi->video_mode_format == VIDEO_MODE_BURST ?
- "burst" : "<unknown>");
- DRM_DEBUG_KMS("Burst mode ratio %d\n", intel_dsi->burst_mode_ratio);
- DRM_DEBUG_KMS("Reset timer %d\n", intel_dsi->rst_timer_val);
- DRM_DEBUG_KMS("Eot %s\n", enableddisabled(intel_dsi->eotp_pkt));
- DRM_DEBUG_KMS("Clockstop %s\n", enableddisabled(!intel_dsi->clock_stop));
- DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
- if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
- DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
- else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT)
- DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
- else
- DRM_DEBUG_KMS("Dual link: NONE\n");
- DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format);
- DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div);
- DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
- DRM_DEBUG_KMS("Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val);
- DRM_DEBUG_KMS("Init Count 0x%x\n", intel_dsi->init_count);
- DRM_DEBUG_KMS("HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count);
- DRM_DEBUG_KMS("LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
- DRM_DEBUG_KMS("DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
- DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
- DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
- DRM_DEBUG_KMS("BTA %s\n",
- enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
-
/* delays in VBT are in unit of 100us, so need to convert
* here in ms
* Delay (100us) * 100 /1000 = Delay / 10 (ms) */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index adef81c8cccb..22666d28f4aa 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -32,13 +32,19 @@
#include <drm/drm_crtc.h>
#include <drm/i915_drm.h>
-#include "dvo.h"
#include "i915_drv.h"
#include "intel_connector.h"
#include "intel_drv.h"
#include "intel_dvo.h"
+#include "intel_dvo_dev.h"
+#include "intel_gmbus.h"
#include "intel_panel.h"
+#define INTEL_DVO_CHIP_NONE 0
+#define INTEL_DVO_CHIP_LVDS 1
+#define INTEL_DVO_CHIP_TMDS 2
+#define INTEL_DVO_CHIP_TVOUT 4
+
#define SIL164_ADDR 0x38
#define CH7xxx_ADDR 0x76
#define TFP410_ADDR 0x38
diff --git a/drivers/gpu/drm/i915/intel_dvo.h b/drivers/gpu/drm/i915/display/intel_dvo.h
index 3ed0fdf8efff..3ed0fdf8efff 100644
--- a/drivers/gpu/drm/i915/intel_dvo.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo.h
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
index 16e0345b711f..94a6ae1e0292 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
@@ -20,12 +20,14 @@
* OF THIS SOFTWARE.
*/
-#ifndef _INTEL_DVO_H
-#define _INTEL_DVO_H
+#ifndef __INTEL_DVO_DEV_H__
+#define __INTEL_DVO_DEV_H__
#include <linux/i2c.h>
+
#include <drm/drm_crtc.h>
-#include "intel_drv.h"
+
+#include "i915_reg.h"
struct intel_dvo_device {
const char *name;
@@ -135,4 +137,4 @@ extern const struct intel_dvo_dev_ops tfp410_ops;
extern const struct intel_dvo_dev_ops ch7017_ops;
extern const struct intel_dvo_dev_ops ns2501_ops;
-#endif /* _INTEL_DVO_H */
+#endif /* __INTEL_DVO_DEV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 5679f2fffb7c..d36cada2cc7d 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -344,6 +344,10 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
HSW_FBCQ_DIS);
}
+ if (IS_GEN(dev_priv, 11))
+ /* Wa_1409120013:icl,ehl */
+ I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
intel_fbc_recompress(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index 50272eda8d43..50272eda8d43 100644
--- a/drivers/gpu/drm/i915/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 89db71996148..1edd44ee32b2 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -144,7 +144,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
if (size * 2 < dev_priv->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
if (obj == NULL)
- obj = i915_gem_object_create(dev_priv, size);
+ obj = i915_gem_object_create_shmem(dev_priv, size);
if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = PTR_ERR(obj);
@@ -213,7 +213,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
}
mutex_lock(&dev->struct_mutex);
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
/* Pin the GGTT vma for our access via info->screen_base.
* This also validates that any existing fb inherited from the
@@ -272,7 +272,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
ifbdev->vma = vma;
ifbdev->vma_flags = flags;
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(pdev, info);
return 0;
@@ -280,7 +280,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
out_unpin:
intel_unpin_fb_vma(vma, flags);
out_unlock:
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index de7c84250eb5..de7c84250eb5 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 74c8b0528294..8545ad32bb50 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -28,6 +28,7 @@
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_fbc.h"
+#include "intel_fifo_underrun.h"
/**
* DOC: fifo underrun handling
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.h b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
new file mode 100644
index 000000000000..e04f22ac1f49
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_FIFO_UNDERRUN_H__
+#define __INTEL_FIFO_UNDERRUN_H__
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+
+struct drm_i915_private;
+
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool enable);
+bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+ enum pipe pch_transcoder,
+ bool enable);
+void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pch_transcoder);
+void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
+void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_FIFO_UNDERRUN_H__ */
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index aa34e33b6087..44273c10cea5 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -53,16 +53,11 @@
* busyness. There is no direct way to detect idleness. Instead an idle timer
* work delayed work should be started from the flush and flip functions and
* cancelled as soon as busyness is detected.
- *
- * Note that there's also an older frontbuffer activity tracking scheme which
- * just tracks general activity. This is done by the various mark_busy and
- * mark_idle functions. For display power management features using these
- * functions is deprecated and should be avoided.
*/
+#include "display/intel_dp.h"
#include "i915_drv.h"
-#include "intel_dp.h"
#include "intel_drv.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index d5894666f658..5727320c8084 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -24,7 +24,7 @@
#ifndef __INTEL_FRONTBUFFER_H__
#define __INTEL_FRONTBUFFER_H__
-#include "i915_gem_object.h"
+#include "gem/i915_gem_object.h"
struct drm_i915_private;
struct drm_i915_gem_object;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 422685d120e9..4f6a9bd5af47 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -26,13 +26,17 @@
* Eric Anholt <eric@anholt.net>
* Chris Wilson <chris@chris-wilson.co.uk>
*/
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
+
#include <linux/export.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/i2c.h>
+
#include <drm/drm_hdcp.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_gmbus.h"
struct gmbus_pin {
const char *name;
@@ -84,11 +88,19 @@ static const struct gmbus_pin gmbus_pins_icp[] = {
[GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM },
};
+static const struct gmbus_pin gmbus_pins_mcc[] = {
+ [GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
+ [GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
+ [GMBUS_PIN_9_TC1_ICP] = { "dpc", GPIOJ },
+};
+
/* pin is expected to be valid */
static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
unsigned int pin)
{
- if (HAS_PCH_ICP(dev_priv))
+ if (HAS_PCH_MCC(dev_priv))
+ return &gmbus_pins_mcc[pin];
+ else if (HAS_PCH_ICP(dev_priv))
return &gmbus_pins_icp[pin];
else if (HAS_PCH_CNP(dev_priv))
return &gmbus_pins_cnp[pin];
@@ -107,7 +119,9 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
{
unsigned int size;
- if (HAS_PCH_ICP(dev_priv))
+ if (HAS_PCH_MCC(dev_priv))
+ size = ARRAY_SIZE(gmbus_pins_mcc);
+ else if (HAS_PCH_ICP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_icp);
else if (HAS_PCH_CNP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_cnp);
@@ -134,7 +148,7 @@ to_intel_gmbus(struct i2c_adapter *i2c)
}
void
-intel_i2c_reset(struct drm_i915_private *dev_priv)
+intel_gmbus_reset(struct drm_i915_private *dev_priv)
{
I915_WRITE(GMBUS0, 0);
I915_WRITE(GMBUS4, 0);
@@ -182,14 +196,15 @@ static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv,
static u32 get_reserved(struct intel_gmbus *bus)
{
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->dev_priv;
+ struct intel_uncore *uncore = &i915->uncore;
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
- if (!IS_I830(dev_priv) && !IS_I845G(dev_priv))
- reserved = I915_READ_NOTRACE(bus->gpio_reg) &
- (GPIO_DATA_PULLUP_DISABLE |
- GPIO_CLOCK_PULLUP_DISABLE);
+ if (!IS_I830(i915) && !IS_I845G(i915))
+ reserved = intel_uncore_read_notrace(uncore, bus->gpio_reg) &
+ (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
return reserved;
}
@@ -197,27 +212,37 @@ static u32 get_reserved(struct intel_gmbus *bus)
static int get_clock(void *data)
{
struct intel_gmbus *bus = data;
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct intel_uncore *uncore = &bus->dev_priv->uncore;
u32 reserved = get_reserved(bus);
- I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK);
- I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
- return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0;
+
+ intel_uncore_write_notrace(uncore,
+ bus->gpio_reg,
+ reserved | GPIO_CLOCK_DIR_MASK);
+ intel_uncore_write_notrace(uncore, bus->gpio_reg, reserved);
+
+ return (intel_uncore_read_notrace(uncore, bus->gpio_reg) &
+ GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
{
struct intel_gmbus *bus = data;
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct intel_uncore *uncore = &bus->dev_priv->uncore;
u32 reserved = get_reserved(bus);
- I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK);
- I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
- return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0;
+
+ intel_uncore_write_notrace(uncore,
+ bus->gpio_reg,
+ reserved | GPIO_DATA_DIR_MASK);
+ intel_uncore_write_notrace(uncore, bus->gpio_reg, reserved);
+
+ return (intel_uncore_read_notrace(uncore, bus->gpio_reg) &
+ GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
{
struct intel_gmbus *bus = data;
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct intel_uncore *uncore = &bus->dev_priv->uncore;
u32 reserved = get_reserved(bus);
u32 clock_bits;
@@ -225,16 +250,18 @@ static void set_clock(void *data, int state_high)
clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
else
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
- GPIO_CLOCK_VAL_MASK;
+ GPIO_CLOCK_VAL_MASK;
- I915_WRITE_NOTRACE(bus->gpio_reg, reserved | clock_bits);
- POSTING_READ(bus->gpio_reg);
+ intel_uncore_write_notrace(uncore,
+ bus->gpio_reg,
+ reserved | clock_bits);
+ intel_uncore_posting_read(uncore, bus->gpio_reg);
}
static void set_data(void *data, int state_high)
{
struct intel_gmbus *bus = data;
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct intel_uncore *uncore = &bus->dev_priv->uncore;
u32 reserved = get_reserved(bus);
u32 data_bits;
@@ -244,8 +271,8 @@ static void set_data(void *data, int state_high)
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
- I915_WRITE_NOTRACE(bus->gpio_reg, reserved | data_bits);
- POSTING_READ(bus->gpio_reg);
+ intel_uncore_write_notrace(uncore, bus->gpio_reg, reserved | data_bits);
+ intel_uncore_posting_read(uncore, bus->gpio_reg);
}
static int
@@ -256,7 +283,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
- intel_i2c_reset(dev_priv);
+ intel_gmbus_reset(dev_priv);
if (IS_PINEVIEW(dev_priv))
pnv_gmbus_clock_gating(dev_priv, false);
@@ -577,8 +604,7 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
/* Display WA #0868: skl,bxt,kbl,cfl,glk,cnl */
if (IS_GEN9_LP(dev_priv))
bxt_gmbus_clock_gating(dev_priv, false);
- else if (HAS_PCH_SPT(dev_priv) ||
- HAS_PCH_KBP(dev_priv) || HAS_PCH_CNP(dev_priv))
+ else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_CNP(dev_priv))
pch_gmbus_clock_gating(dev_priv, false);
retry:
@@ -687,8 +713,7 @@ out:
/* Display WA #0868: skl,bxt,kbl,cfl,glk,cnl */
if (IS_GEN9_LP(dev_priv))
bxt_gmbus_clock_gating(dev_priv, true);
- else if (HAS_PCH_SPT(dev_priv) ||
- HAS_PCH_KBP(dev_priv) || HAS_PCH_CNP(dev_priv))
+ else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_CNP(dev_priv))
pch_gmbus_clock_gating(dev_priv, true);
return ret;
@@ -811,7 +836,7 @@ static const struct i2c_lock_operations gmbus_lock_ops = {
* intel_gmbus_setup - instantiate all Intel i2c GMBuses
* @dev_priv: i915 device private
*/
-int intel_setup_gmbus(struct drm_i915_private *dev_priv)
+int intel_gmbus_setup(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
struct intel_gmbus *bus;
@@ -872,7 +897,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv)
goto err;
}
- intel_i2c_reset(dev_priv);
+ intel_gmbus_reset(dev_priv);
return 0;
@@ -918,7 +943,14 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
mutex_unlock(&dev_priv->gmbus_mutex);
}
-void intel_teardown_gmbus(struct drm_i915_private *dev_priv)
+bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+ return bus->force_bit;
+}
+
+void intel_gmbus_teardown(struct drm_i915_private *dev_priv)
{
struct intel_gmbus *bus;
unsigned int pin;
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.h b/drivers/gpu/drm/i915/display/intel_gmbus.h
new file mode 100644
index 000000000000..d989085b8d22
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_GMBUS_H__
+#define __INTEL_GMBUS_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct i2c_adapter;
+
+int intel_gmbus_setup(struct drm_i915_private *dev_priv);
+void intel_gmbus_teardown(struct drm_i915_private *dev_priv);
+bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
+ unsigned int pin);
+int intel_gmbus_output_aksv(struct i2c_adapter *adapter);
+
+struct i2c_adapter *
+intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
+void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter);
+void intel_gmbus_reset(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_GMBUS_H__ */
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 99b007169c49..bc3a94d491c4 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -16,6 +16,7 @@
#include "i915_reg.h"
#include "intel_drv.h"
#include "intel_hdcp.h"
+#include "intel_sideband.h"
#define KEY_LOAD_TRIES 5
#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
@@ -78,7 +79,7 @@ bool intel_hdcp_capable(struct intel_connector *connector)
}
/* Is HDCP2.2 capable on Platform and Sink */
-static bool intel_hdcp2_capable(struct intel_connector *connector)
+bool intel_hdcp2_capable(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
@@ -213,10 +214,8 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
* from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
*/
if (IS_GEN9_BC(dev_priv)) {
- mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv,
SKL_PCODE_LOAD_HDCP_KEYS, 1);
- mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
ret);
@@ -492,9 +491,11 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
/* Implements Part 2 of the HDCP authorization procedure */
static
-int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
- const struct intel_hdcp_shim *shim)
+int intel_hdcp_auth_downstream(struct intel_connector *connector)
{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ const struct intel_hdcp_shim *shim = connector->hdcp.shim;
+ struct drm_device *dev = connector->base.dev;
u8 bstatus[2], num_downstream, *ksv_fifo;
int ret, i, tries = 3;
@@ -533,6 +534,11 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
if (ret)
goto err;
+ if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) {
+ DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n");
+ return -EPERM;
+ }
+
/*
* When V prime mismatches, DP Spec mandates re-read of
* V prime atleast twice.
@@ -559,9 +565,12 @@ err:
}
/* Implements Part 1 of the HDCP authorization procedure */
-static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
- const struct intel_hdcp_shim *shim)
+static int intel_hdcp_auth(struct intel_connector *connector)
{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ struct drm_device *dev = connector->base.dev;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
struct drm_i915_private *dev_priv;
enum port port;
unsigned long r0_prime_gen_start;
@@ -627,6 +636,11 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
if (ret < 0)
return ret;
+ if (drm_hdcp_check_ksvs_revoked(dev, bksv.shim, 1)) {
+ DRM_ERROR("BKSV is revoked\n");
+ return -EPERM;
+ }
+
I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
@@ -700,7 +714,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
*/
if (repeater_present)
- return intel_hdcp_auth_downstream(intel_dig_port, shim);
+ return intel_hdcp_auth_downstream(connector);
DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
return 0;
@@ -763,7 +777,7 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
/* Incase of authentication failures, HDCP spec expects reauth. */
for (i = 0; i < tries; i++) {
- ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim);
+ ret = intel_hdcp_auth(connector);
if (!ret) {
hdcp->hdcp_encrypted = true;
return 0;
@@ -1162,6 +1176,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
{
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
+ struct drm_device *dev = connector->base.dev;
union {
struct hdcp2_ake_init ake_init;
struct hdcp2_ake_send_cert send_cert;
@@ -1196,6 +1211,12 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
+ if (drm_hdcp_check_ksvs_revoked(dev, msgs.send_cert.cert_rx.receiver_id,
+ 1)) {
+ DRM_ERROR("Receiver ID is revoked\n");
+ return -EPERM;
+ }
+
/*
* Here msgs.no_stored_km will hold msgs corresponding to the km
* stored also.
@@ -1306,7 +1327,7 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
/* Prepare RepeaterAuth_Stream_Manage msg */
msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
- drm_hdcp2_u32_to_seq_num(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
+ drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
/* K no of streams is fixed as 1. Stored as big-endian. */
msgs.stream_manage.k = cpu_to_be16(1);
@@ -1348,13 +1369,14 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
{
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
+ struct drm_device *dev = connector->base.dev;
union {
struct hdcp2_rep_send_receiverid_list recvid_list;
struct hdcp2_rep_send_ack rep_ack;
} msgs;
const struct intel_hdcp_shim *shim = hdcp->shim;
+ u32 seq_num_v, device_cnt;
u8 *rx_info;
- u32 seq_num_v;
int ret;
ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
@@ -1371,7 +1393,8 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
}
/* Converting and Storing the seq_num_v to local variable as DWORD */
- seq_num_v = drm_hdcp2_seq_num_to_u32(msgs.recvid_list.seq_num_v);
+ seq_num_v =
+ drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
if (seq_num_v < hdcp->seq_num_v) {
/* Roll over of the seq_num_v from repeater. Reauthenticate. */
@@ -1379,6 +1402,14 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
return -EINVAL;
}
+ device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
+ HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
+ if (drm_hdcp_check_ksvs_revoked(dev, msgs.recvid_list.receiver_ids,
+ device_cnt)) {
+ DRM_ERROR("Revoked receiver ID(s) is in list\n");
+ return -EPERM;
+ }
+
ret = hdcp2_verify_rep_topology_prepare_ack(connector,
&msgs.recvid_list,
&msgs.rep_ack);
diff --git a/drivers/gpu/drm/i915/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index a75f25f09d39..be8da85c866a 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -25,6 +25,7 @@ int intel_hdcp_enable(struct intel_connector *connector);
int intel_hdcp_disable(struct intel_connector *connector);
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
bool intel_hdcp_capable(struct intel_connector *connector);
+bool intel_hdcp2_capable(struct intel_connector *connector);
void intel_hdcp_component_init(struct drm_i915_private *dev_priv);
void intel_hdcp_component_fini(struct drm_i915_private *dev_priv);
void intel_hdcp_cleanup(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 34be2cfd0ec8..0ebec69bbbfc 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -39,17 +39,24 @@
#include <drm/i915_drm.h>
#include <drm/intel_lpe_audio.h>
+#include "i915_debugfs.h"
#include "i915_drv.h"
+#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_dp.h"
+#include "intel_dpio_phy.h"
#include "intel_drv.h"
+#include "intel_fifo_underrun.h"
+#include "intel_gmbus.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
+#include "intel_hotplug.h"
#include "intel_lspcon.h"
#include "intel_sdvo.h"
#include "intel_panel.h"
+#include "intel_sideband.h"
static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
{
@@ -122,6 +129,8 @@ static u32 g4x_infoframe_enable(unsigned int type)
return VIDEO_DIP_ENABLE_SPD;
case HDMI_INFOFRAME_TYPE_VENDOR:
return VIDEO_DIP_ENABLE_VENDOR;
+ case HDMI_INFOFRAME_TYPE_DRM:
+ return 0;
default:
MISSING_CASE(type);
return 0;
@@ -145,6 +154,8 @@ static u32 hsw_infoframe_enable(unsigned int type)
return VIDEO_DIP_ENABLE_SPD_HSW;
case HDMI_INFOFRAME_TYPE_VENDOR:
return VIDEO_DIP_ENABLE_VS_HSW;
+ case HDMI_INFOFRAME_TYPE_DRM:
+ return VIDEO_DIP_ENABLE_DRM_GLK;
default:
MISSING_CASE(type);
return 0;
@@ -170,6 +181,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_VENDOR:
return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i);
+ case HDMI_INFOFRAME_TYPE_DRM:
+ return GLK_TVIDEO_DIP_DRM_DATA(cpu_transcoder, i);
default:
MISSING_CASE(type);
return INVALID_MMIO_REG;
@@ -543,10 +556,16 @@ static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
+ u32 mask;
+
+ mask = (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
+ VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
+ VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
+
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ mask |= VIDEO_DIP_ENABLE_DRM_GLK;
- return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
- VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
- VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
+ return val & mask;
}
static const u8 infoframe_type_to_idx[] = {
@@ -556,6 +575,7 @@ static const u8 infoframe_type_to_idx[] = {
HDMI_INFOFRAME_TYPE_AVI,
HDMI_INFOFRAME_TYPE_SPD,
HDMI_INFOFRAME_TYPE_VENDOR,
+ HDMI_INFOFRAME_TYPE_DRM,
};
u32 intel_hdmi_infoframe_enable(unsigned int type)
@@ -778,6 +798,40 @@ intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder,
return true;
}
+static bool
+intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct hdmi_drm_infoframe *frame = &crtc_state->infoframes.drm.drm;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ int ret;
+
+ if (!(INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)))
+ return true;
+
+ if (!crtc_state->has_infoframe)
+ return true;
+
+ if (!conn_state->hdr_output_metadata)
+ return true;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM);
+
+ ret = drm_hdmi_infoframe_set_hdr_metadata(frame, conn_state);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n");
+ return false;
+ }
+
+ ret = hdmi_drm_infoframe_check(frame);
+ if (WARN_ON(ret))
+ return false;
+
+ return true;
+}
+
static void g4x_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
@@ -846,19 +900,6 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
&crtc_state->infoframes.hdmi);
}
-static bool hdmi_sink_is_deep_color(const struct drm_connector_state *conn_state)
-{
- struct drm_connector *connector = conn_state->connector;
-
- /*
- * HDMI cloning is only supported on g4x which doesn't
- * support deep color or GCP infoframes anyway so no
- * need to worry about multiple HDMI sinks here.
- */
-
- return connector->display_info.bpc > 8;
-}
-
/*
* Determine if default_phase=1 can be indicated in the GCP infoframe.
*
@@ -963,8 +1004,8 @@ static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
crtc_state->infoframes.enable |=
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL);
- /* Indicate color depth whenever the sink supports deep color */
- if (hdmi_sink_is_deep_color(conn_state))
+ /* Indicate color indication for deep color mode */
+ if (crtc_state->pipe_bpp > 24)
crtc_state->infoframes.gcp |= GCP_COLOR_INDICATION;
/* Enable default_phase whenever the display mode is suitably aligned */
@@ -1153,7 +1194,8 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
- VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
+ VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
+ VIDEO_DIP_ENABLE_DRM_GLK);
if (!enable) {
I915_WRITE(reg, val);
@@ -1176,6 +1218,9 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_VENDOR,
&crtc_state->infoframes.hdmi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_DRM,
+ &crtc_state->infoframes.drm);
}
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
@@ -1762,7 +1807,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (pipe_config->infoframes.enable)
pipe_config->has_infoframe = true;
- if (tmp & SDVO_AUDIO_ENABLE)
+ if (tmp & HDMI_AUDIO_ENABLE)
pipe_config->has_audio = true;
if (!HAS_PCH_SPLIT(dev_priv) &&
@@ -1821,7 +1866,7 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
temp |= SDVO_ENABLE;
if (pipe_config->has_audio)
- temp |= SDVO_AUDIO_ENABLE;
+ temp |= HDMI_AUDIO_ENABLE;
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
@@ -1843,7 +1888,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
temp |= SDVO_ENABLE;
if (pipe_config->has_audio)
- temp |= SDVO_AUDIO_ENABLE;
+ temp |= HDMI_AUDIO_ENABLE;
/*
* HW workaround, need to write this twice for issue
@@ -1895,7 +1940,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
temp |= SDVO_ENABLE;
if (pipe_config->has_audio)
- temp |= SDVO_AUDIO_ENABLE;
+ temp |= HDMI_AUDIO_ENABLE;
/*
* WaEnableHDMI8bpcBefore12bpc:snb,ivb
@@ -1955,7 +2000,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
temp = I915_READ(intel_hdmi->hdmi_reg);
- temp &= ~(SDVO_ENABLE | SDVO_AUDIO_ENABLE);
+ temp &= ~(SDVO_ENABLE | HDMI_AUDIO_ENABLE);
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
@@ -2162,7 +2207,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
if (bpc == 10 && INTEL_GEN(dev_priv) < 11)
return false;
- if (crtc_state->pipe_bpp <= 8*3)
+ if (crtc_state->pipe_bpp < bpc * 3)
return false;
if (!crtc_state->has_hdmi_sink)
@@ -2382,6 +2427,11 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
return -EINVAL;
}
+ if (!intel_hdmi_compute_drm_infoframe(encoder, pipe_config, conn_state)) {
+ DRM_DEBUG_KMS("bad DRM infoframe\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -2620,12 +2670,12 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, old_crtc_state, true);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
}
static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
@@ -2654,6 +2704,36 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
chv_phy_release_cl2_override(encoder);
}
+static struct i2c_adapter *
+intel_hdmi_get_i2c_adapter(struct drm_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+
+ return intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
+}
+
+static void intel_hdmi_create_i2c_symlink(struct drm_connector *connector)
+{
+ struct i2c_adapter *adapter = intel_hdmi_get_i2c_adapter(connector);
+ struct kobject *i2c_kobj = &adapter->dev.kobj;
+ struct kobject *connector_kobj = &connector->kdev->kobj;
+ int ret;
+
+ ret = sysfs_create_link(connector_kobj, i2c_kobj, i2c_kobj->name);
+ if (ret)
+ DRM_ERROR("Failed to create i2c symlink (%d)\n", ret);
+}
+
+static void intel_hdmi_remove_i2c_symlink(struct drm_connector *connector)
+{
+ struct i2c_adapter *adapter = intel_hdmi_get_i2c_adapter(connector);
+ struct kobject *i2c_kobj = &adapter->dev.kobj;
+ struct kobject *connector_kobj = &connector->kdev->kobj;
+
+ sysfs_remove_link(connector_kobj, i2c_kobj->name);
+}
+
static int
intel_hdmi_connector_register(struct drm_connector *connector)
{
@@ -2665,6 +2745,8 @@ intel_hdmi_connector_register(struct drm_connector *connector)
i915_debugfs_connector_add(connector);
+ intel_hdmi_create_i2c_symlink(connector);
+
return ret;
}
@@ -2676,6 +2758,13 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
intel_connector_destroy(connector);
}
+static void intel_hdmi_connector_unregister(struct drm_connector *connector)
+{
+ intel_hdmi_remove_i2c_symlink(connector);
+
+ intel_connector_unregister(connector);
+}
+
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.detect = intel_hdmi_detect,
.force = intel_hdmi_force,
@@ -2683,7 +2772,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_hdmi_connector_register,
- .early_unregister = intel_connector_unregister,
+ .early_unregister = intel_hdmi_connector_unregister,
.destroy = intel_hdmi_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
@@ -2721,6 +2810,10 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
drm_connector_attach_content_type_property(connector);
connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.hdr_output_metadata_property, 0);
+
if (!HAS_GMCH(dev_priv))
drm_connector_attach_max_bpc_property(connector, 8, 12);
}
@@ -2866,6 +2959,28 @@ static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
return ddc_pin;
}
+static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+{
+ u8 ddc_pin;
+
+ switch (port) {
+ case PORT_A:
+ ddc_pin = GMBUS_PIN_1_BXT;
+ break;
+ case PORT_B:
+ ddc_pin = GMBUS_PIN_2_BXT;
+ break;
+ case PORT_C:
+ ddc_pin = GMBUS_PIN_9_TC1_ICP;
+ break;
+ default:
+ MISSING_CASE(port);
+ ddc_pin = GMBUS_PIN_1_BXT;
+ break;
+ }
+ return ddc_pin;
+}
+
static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
enum port port)
{
@@ -2902,7 +3017,9 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
return info->alternate_ddc_pin;
}
- if (HAS_PCH_ICP(dev_priv))
+ if (HAS_PCH_MCC(dev_priv))
+ ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
+ else if (HAS_PCH_ICP(dev_priv))
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_CNP(dev_priv))
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 106c2e0bc3c9..106c2e0bc3c9 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index b8937c788f03..ea3de4acc850 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -27,6 +27,7 @@
#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_hotplug.h"
/**
* DOC: Hotplug
@@ -229,7 +230,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
intel_wakeref_t wakeref;
enum hpd_pin pin;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_irq(&dev_priv->irq_lock);
for_each_hpd_pin(pin) {
@@ -262,7 +263,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
bool intel_encoder_hotplug(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
new file mode 100644
index 000000000000..805f897dbb7a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_HOTPLUG_H__
+#define __INTEL_HOTPLUG_H__
+
+#include <linux/types.h>
+
+#include <drm/i915_drm.h>
+
+struct drm_i915_private;
+struct intel_connector;
+struct intel_encoder;
+
+void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
+bool intel_encoder_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector);
+void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+ u32 pin_mask, u32 long_mask);
+void intel_hpd_init(struct drm_i915_private *dev_priv);
+void intel_hpd_init_work(struct drm_i915_private *dev_priv);
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
+enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
+ enum port port);
+bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
+void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
+
+#endif /* __INTEL_HOTPLUG_H__ */
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index f8239bca3820..b19800b58442 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -61,16 +61,18 @@
*/
#include <linux/acpi.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/irq.h>
#include <linux/pci.h>
-#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
-#include "i915_drv.h"
-#include <linux/delay.h>
#include <drm/intel_lpe_audio.h>
+#include "i915_drv.h"
+#include "intel_lpe_audio.h"
+
#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->lpe_audio.platdev != NULL)
static struct platform_device *
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.h b/drivers/gpu/drm/i915/display/intel_lpe_audio.h
new file mode 100644
index 000000000000..f848c5038714
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_LPE_AUDIO_H__
+#define __INTEL_LPE_AUDIO_H__
+
+#include <linux/types.h>
+
+enum pipe;
+enum port;
+struct drm_i915_private;
+
+int intel_lpe_audio_init(struct drm_i915_private *dev_priv);
+void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
+void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
+void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum port port,
+ const void *eld, int ls_clock, bool dp_output);
+
+#endif /* __INTEL_LPE_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 7028d0cf3bb1..7028d0cf3bb1 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
diff --git a/drivers/gpu/drm/i915/intel_lspcon.h b/drivers/gpu/drm/i915/display/intel_lspcon.h
index 37cfddf8a9c5..37cfddf8a9c5 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.h
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.h
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 51d1d59c1619..efefed62a7f8 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -40,8 +40,10 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "intel_atomic.h"
#include "intel_connector.h"
#include "intel_drv.h"
+#include "intel_gmbus.h"
#include "intel_lvds.h"
#include "intel_panel.h"
diff --git a/drivers/gpu/drm/i915/intel_lvds.h b/drivers/gpu/drm/i915/display/intel_lvds.h
index bc9c8b84ba2f..bc9c8b84ba2f 100644
--- a/drivers/gpu/drm/i915/intel_lvds.h
+++ b/drivers/gpu/drm/i915/display/intel_lvds.h
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 8fa1159d097f..824881271351 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -32,10 +32,11 @@
#include <drm/i915_drm.h>
+#include "display/intel_panel.h"
+
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_opregion.h"
-#include "intel_panel.h"
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h
index 4aa68ffbd30e..4aa68ffbd30e 100644
--- a/drivers/gpu/drm/i915/intel_opregion.h
+++ b/drivers/gpu/drm/i915/display/intel_opregion.h
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index eb317759b5d3..21339b7f6a3e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -25,13 +25,17 @@
*
* Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
*/
-#include <drm/i915_drm.h>
+
#include <drm/drm_fourcc.h>
+#include <drm/i915_drm.h>
+
+#include "gem/i915_gem_pm.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
+#include "intel_overlay.h"
/* Limits for overlay size. According to intel doc, the real limits are:
* Y width: 4095, UV width (planar): 2047, Y height: 2047,
@@ -235,10 +239,9 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
static struct i915_request *alloc_request(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->i915;
- struct intel_engine_cs *engine = dev_priv->engine[RCS0];
+ struct intel_engine_cs *engine = overlay->i915->engine[RCS0];
- return i915_request_alloc(engine, dev_priv->kernel_context);
+ return i915_request_create(engine->kernel_context);
}
/* overlay needs to be disable in OCMD reg */
@@ -762,8 +765,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+ i915_gem_object_lock(new_bo);
vma = i915_gem_object_pin_to_display_plane(new_bo,
0, NULL, PIN_MAPPABLE);
+ i915_gem_object_unlock(new_bo);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_pin_section;
@@ -1302,15 +1307,20 @@ out_unlock:
static int get_registers(struct intel_overlay *overlay, bool use_phys)
{
+ struct drm_i915_private *i915 = overlay->i915;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int err;
- obj = i915_gem_object_create_stolen(overlay->i915, PAGE_SIZE);
+ mutex_lock(&i915->drm.struct_mutex);
+
+ obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
if (obj == NULL)
- obj = i915_gem_object_create_internal(overlay->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_unlock;
+ }
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma)) {
@@ -1331,10 +1341,13 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
}
overlay->reg_bo = obj;
+ mutex_unlock(&i915->drm.struct_mutex);
return 0;
err_put_bo:
i915_gem_object_put(obj);
+err_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -1360,18 +1373,10 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
INIT_ACTIVE_REQUEST(&overlay->last_flip);
- mutex_lock(&dev_priv->drm.struct_mutex);
-
ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
if (ret)
goto out_free;
- ret = i915_gem_object_set_to_gtt_domain(overlay->reg_bo, true);
- if (ret)
- goto out_reg_bo;
-
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
memset_io(overlay->regs, 0, sizeof(struct overlay_registers));
update_polyphase_filter(overlay->regs);
update_reg_attrs(overlay, overlay->regs);
@@ -1380,10 +1385,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
DRM_INFO("Initialized overlay support.\n");
return;
-out_reg_bo:
- i915_gem_object_put(overlay->reg_bo);
out_free:
- mutex_unlock(&dev_priv->drm.struct_mutex);
kfree(overlay);
}
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.h b/drivers/gpu/drm/i915/display/intel_overlay.h
new file mode 100644
index 000000000000..a167c28acd27
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_overlay.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_OVERLAY_H__
+#define __INTEL_OVERLAY_H__
+
+struct drm_device;
+struct drm_file;
+struct drm_i915_error_state_buf;
+struct drm_i915_private;
+struct intel_overlay;
+struct intel_overlay_error_state;
+
+void intel_overlay_setup(struct drm_i915_private *dev_priv);
+void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
+int intel_overlay_switch_off(struct intel_overlay *overlay);
+int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+void intel_overlay_reset(struct drm_i915_private *dev_priv);
+struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
+void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
+ struct intel_overlay_error_state *error);
+
+#endif /* __INTEL_OVERLAY_H__ */
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 4ab4ce6569e7..39d742094065 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -35,7 +35,9 @@
#include <linux/pwm.h>
#include "intel_connector.h"
+#include "intel_dp_aux_backlight.h"
#include "intel_drv.h"
+#include "intel_dsi_dcs_backlight.h"
#include "intel_panel.h"
#define CRC_PMIC_PWM_PERIOD_NS 21333
@@ -1286,7 +1288,7 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
intel_wakeref_t wakeref;
int ret = 0;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
u32 hw_level;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
diff --git a/drivers/gpu/drm/i915/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index cedeea443336..cedeea443336 100644
--- a/drivers/gpu/drm/i915/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index e7c7be4911c1..1e2c4307d05a 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -29,6 +29,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include "intel_atomic.h"
#include "intel_drv.h"
#include "intel_pipe_crc.h"
@@ -313,17 +314,8 @@ retry:
if (IS_HASWELL(dev_priv) &&
pipe_config->base.active && crtc->pipe == PIPE_A &&
- pipe_config->cpu_transcoder == TRANSCODER_EDP) {
- bool old_need_power_well = pipe_config->pch_pfit.enabled ||
- pipe_config->pch_pfit.force_thru;
- bool new_need_power_well = pipe_config->pch_pfit.enabled ||
- enable;
-
- pipe_config->pch_pfit.force_thru = enable;
-
- if (old_need_power_well != new_need_power_well)
- pipe_config->base.connectors_changed = true;
- }
+ pipe_config->cpu_transcoder == TRANSCODER_EDP)
+ pipe_config->base.mode_changed = true;
ret = drm_atomic_commit(state);
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.h b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
index 81eaf1854788..db258a756fc6 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.h
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
@@ -9,9 +9,11 @@
#include <linux/types.h>
struct drm_crtc;
+struct drm_i915_private;
struct intel_crtc;
#ifdef CONFIG_DEBUG_FS
+void intel_display_crc_init(struct drm_i915_private *dev_priv);
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
const char *source_name, size_t *values_cnt);
@@ -20,6 +22,7 @@ const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
#else
+static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
#define intel_crtc_set_crc_source NULL
#define intel_crtc_verify_crc_source NULL
#define intel_crtc_get_crc_sources NULL
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 963663ba0edf..69d908e6a050 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -23,8 +23,9 @@
#include <drm/drm_atomic_helper.h>
+#include "display/intel_dp.h"
+
#include "i915_drv.h"
-#include "intel_dp.h"
#include "intel_drv.h"
#include "intel_psr.h"
#include "intel_sprite.h"
@@ -229,16 +230,6 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
}
}
-static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
-{
- u8 dprx = 0;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
- &dprx) != 1)
- return false;
- return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
-}
-
static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
{
u8 alpm_caps = 0;
@@ -352,7 +343,7 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct edp_vsc_psr psr_vsc;
+ struct dp_sdp psr_vsc;
if (dev_priv->psr.psr2_enabled) {
/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
@@ -872,16 +863,23 @@ void intel_psr_disable(struct intel_dp *intel_dp,
static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
{
- /*
- * Display WA #0884: all
- * This documented WA for bxt can be safely applied
- * broadly so we can force HW tracking to exit PSR
- * instead of disabling and re-enabling.
- * Workaround tells us to write 0 to CUR_SURFLIVE_A,
- * but it makes more sense write to the current active
- * pipe.
- */
- I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
+ if (INTEL_GEN(dev_priv) >= 9)
+ /*
+ * Display WA #0884: skl+
+ * This documented WA for bxt can be safely applied
+ * broadly so we can force HW tracking to exit PSR
+ * instead of disabling and re-enabling.
+ * Workaround tells us to write 0 to CUR_SURFLIVE_A,
+ * but it makes more sense write to the current active
+ * pipe.
+ */
+ I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
+ else
+ /*
+ * A write to CURSURFLIVE do not cause HW tracking to exit PSR
+ * on older gens so doing the manual exit instead.
+ */
+ intel_psr_exit(dev_priv);
}
/**
@@ -912,6 +910,15 @@ void intel_psr_update(struct intel_dp *intel_dp,
/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
if (crtc_state->crc_enabled && psr->enabled)
psr_force_hw_tracking_exit(dev_priv);
+ else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) {
+ /*
+ * Activate PSR again after a force exit when enabling
+ * CRC in older gens
+ */
+ if (!dev_priv->psr.active &&
+ !dev_priv->psr.busy_frontbuffer_bits)
+ schedule_work(&dev_priv->psr.work);
+ }
goto unlock;
}
diff --git a/drivers/gpu/drm/i915/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index dc818826f36d..dc818826f36d 100644
--- a/drivers/gpu/drm/i915/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
diff --git a/drivers/gpu/drm/i915/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index ec2b0fc92b8b..0b749c28541f 100644
--- a/drivers/gpu/drm/i915/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -6,6 +6,7 @@
#include <linux/dmi.h>
#include "intel_drv.h"
+#include "intel_quirks.h"
/*
* Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h
new file mode 100644
index 000000000000..b0fcff142a56
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_quirks.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_QUIRKS_H__
+#define __INTEL_QUIRKS_H__
+
+struct drm_i915_private;
+
+void intel_init_quirks(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_QUIRKS_H__ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 9ecfba0a54a1..ceda03e5a3d4 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -37,9 +37,13 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "intel_atomic.h"
#include "intel_connector.h"
#include "intel_drv.h"
+#include "intel_fifo_underrun.h"
+#include "intel_gmbus.h"
#include "intel_hdmi.h"
+#include "intel_hotplug.h"
#include "intel_panel.h"
#include "intel_sdvo.h"
#include "intel_sdvo_regs.h"
@@ -518,6 +522,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
#define BUF_LEN 256
char buffer[BUF_LEN];
+ buffer[0] = '\0';
/*
* The documentation states that all commands will be
@@ -581,7 +586,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
return true;
log_fail:
- DRM_DEBUG_KMS("%s: R: ... failed\n", SDVO_NAME(intel_sdvo));
+ DRM_DEBUG_KMS("%s: R: ... failed %s\n",
+ SDVO_NAME(intel_sdvo), buffer);
return false;
}
@@ -976,6 +982,9 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
if_index, length, hbuf_size);
+ if (hbuf_size < length)
+ return false;
+
for (i = 0; i < hbuf_size; i += 8) {
memset(tmp, 0, 8);
if (i < length)
@@ -1008,6 +1017,11 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
if (av_split < if_index)
return 0;
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return -ENXIO;
+
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_HBUF_TXRATE,
&tx_rate, 1))
@@ -1016,11 +1030,6 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
if (tx_rate == SDVO_HBUF_TX_DISABLED)
return 0;
- if (!intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_HBUF_INDEX,
- set_buf_index, 2))
- return -ENXIO;
-
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
&hbuf_size, 1))
return -ENXIO;
@@ -1099,7 +1108,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
SDVO_HBUF_TX_VSYNC,
- sdvo_data, sizeof(sdvo_data));
+ sdvo_data, len);
}
static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
@@ -1125,7 +1134,7 @@ static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
crtc_state->infoframes.enable |=
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
- ret = hdmi_infoframe_unpack(frame, sdvo_data, sizeof(sdvo_data));
+ ret = hdmi_infoframe_unpack(frame, sdvo_data, len);
if (ret) {
DRM_DEBUG_KMS("Failed to unpack AVI infoframe\n");
return;
@@ -2388,9 +2397,10 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
};
static int intel_sdvo_atomic_check(struct drm_connector *conn,
- struct drm_connector_state *new_conn_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = new_conn_state->state;
+ struct drm_connector_state *new_conn_state =
+ drm_atomic_get_new_connector_state(state, conn);
struct drm_connector_state *old_conn_state =
drm_atomic_get_old_connector_state(state, conn);
struct intel_sdvo_connector_state *old_state =
@@ -2402,13 +2412,13 @@ static int intel_sdvo_atomic_check(struct drm_connector *conn,
(memcmp(&old_state->tv, &new_state->tv, sizeof(old_state->tv)) ||
memcmp(&old_conn_state->tv, &new_conn_state->tv, sizeof(old_conn_state->tv)))) {
struct drm_crtc_state *crtc_state =
- drm_atomic_get_new_crtc_state(new_conn_state->state,
+ drm_atomic_get_new_crtc_state(state,
new_conn_state->crtc);
crtc_state->connectors_changed = true;
}
- return intel_digital_connector_atomic_check(conn, new_conn_state);
+ return intel_digital_connector_atomic_check(conn, state);
}
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.h b/drivers/gpu/drm/i915/display/intel_sdvo.h
index c9e05bcdd141..c9e05bcdd141 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.h
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.h
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/display/intel_sdvo_regs.h
index e9ba3b047f93..13b9a8e257bb 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_sdvo_regs.h
@@ -24,6 +24,12 @@
* Eric Anholt <eric@anholt.net>
*/
+#ifndef __INTEL_SDVO_REGS_H__
+#define __INTEL_SDVO_REGS_H__
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
/*
* SDVO command definitions and structures.
*/
@@ -731,3 +737,5 @@ struct intel_sdvo_encode {
u8 dvi_rev;
u8 hdmi_rev;
} __packed;
+
+#endif /* __INTEL_SDVO_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 2913e89280d7..004b52027ae8 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -256,6 +256,16 @@ int intel_plane_check_stride(const struct intel_plane_state *plane_state)
unsigned int rotation = plane_state->base.rotation;
u32 stride, max_stride;
+ /*
+ * We ignore stride for all invisible planes that
+ * can be remapped. Otherwise we could end up
+ * with a false positive when the remapping didn't
+ * kick in due the plane being invisible.
+ */
+ if (intel_plane_can_remap(plane_state) &&
+ !plane_state->base.visible)
+ return 0;
+
/* FIXME other color planes? */
stride = plane_state->color_plane[0].stride;
max_stride = plane->max_stride(plane, fb->format->format,
@@ -325,7 +335,8 @@ skl_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation)
{
- int cpp = drm_format_plane_cpp(pixel_format, 0);
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
/*
* "The stride in bytes must not exceed the
@@ -1417,6 +1428,10 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ ret = i9xx_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
if (!plane_state->base.visible)
return 0;
@@ -1428,10 +1443,6 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = i9xx_check_plane_surface(plane_state);
- if (ret)
- return ret;
-
if (INTEL_GEN(dev_priv) >= 7)
plane_state->ctl = ivb_sprite_ctl(crtc_state, plane_state);
else
@@ -1475,6 +1486,10 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ ret = i9xx_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
if (!plane_state->base.visible)
return 0;
@@ -1482,10 +1497,6 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = i9xx_check_plane_surface(plane_state);
- if (ret)
- return ret;
-
plane_state->ctl = vlv_sprite_ctl(crtc_state, plane_state);
return 0;
@@ -1639,6 +1650,10 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ ret = skl_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
if (!plane_state->base.visible)
return 0;
@@ -1654,10 +1669,6 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = skl_check_plane_surface(plane_state);
- if (ret)
- return ret;
-
/* HW only has 8 bits pixel precision, disable plane if invisible */
if (!(plane_state->base.alpha >> 8))
plane_state->base.visible = false;
@@ -2146,8 +2157,6 @@ static const struct drm_plane_funcs g4x_sprite_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = g4x_sprite_format_mod_supported,
@@ -2157,8 +2166,6 @@ static const struct drm_plane_funcs snb_sprite_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = snb_sprite_format_mod_supported,
@@ -2168,8 +2175,6 @@ static const struct drm_plane_funcs vlv_sprite_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = vlv_sprite_format_mod_supported,
@@ -2179,8 +2184,6 @@ static const struct drm_plane_funcs skl_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = skl_plane_format_mod_supported,
diff --git a/drivers/gpu/drm/i915/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index 84be8686be16..500f6bffb139 100644
--- a/drivers/gpu/drm/i915/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -43,13 +43,17 @@ static inline bool icl_is_nv12_y_plane(enum plane_id id)
return false;
}
+static inline u8 icl_hdr_plane_mask(void)
+{
+ return BIT(PLANE_PRIMARY) |
+ BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1);
+}
+
static inline bool icl_is_hdr_plane(struct drm_i915_private *dev_priv,
enum plane_id plane_id)
{
- if (INTEL_GEN(dev_priv) < 11)
- return false;
-
- return plane_id < PLANE_SPRITE2;
+ return INTEL_GEN(dev_priv) >= 11 &&
+ icl_hdr_plane_mask() & BIT(plane_id);
}
#endif /* __INTEL_SPRITE_H__ */
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 5dbba33f4202..0a95df6c6a57 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -38,6 +38,7 @@
#include "i915_drv.h"
#include "intel_connector.h"
#include "intel_drv.h"
+#include "intel_hotplug.h"
#include "intel_tv.h"
enum tv_margin {
@@ -1820,16 +1821,18 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
};
static int intel_tv_atomic_check(struct drm_connector *connector,
- struct drm_connector_state *new_state)
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *new_state;
struct drm_crtc_state *new_crtc_state;
struct drm_connector_state *old_state;
+ new_state = drm_atomic_get_new_connector_state(state, connector);
if (!new_state->crtc)
return 0;
- old_state = drm_atomic_get_old_connector_state(new_state->state, connector);
- new_crtc_state = drm_atomic_get_new_crtc_state(new_state->state, new_state->crtc);
+ old_state = drm_atomic_get_old_connector_state(state, connector);
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
if (old_state->tv.mode != new_state->tv.mode ||
old_state->tv.margins.left != new_state->tv.margins.left ||
diff --git a/drivers/gpu/drm/i915/intel_tv.h b/drivers/gpu/drm/i915/display/intel_tv.h
index 44518575ec5c..44518575ec5c 100644
--- a/drivers/gpu/drm/i915/intel_tv.h
+++ b/drivers/gpu/drm/i915/display/intel_tv.h
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index fdbbb9a53804..2f4894e9a03d 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -75,65 +75,51 @@ struct bdb_header {
u16 bdb_size;
} __packed;
-/* strictly speaking, this is a "skip" block, but it has interesting info */
-struct vbios_data {
- u8 type; /* 0 == desktop, 1 == mobile */
- u8 relstage;
- u8 chipset;
- u8 lvds_present:1;
- u8 tv_present:1;
- u8 rsvd2:6; /* finish byte */
- u8 rsvd3[4];
- u8 signon[155];
- u8 copyright[61];
- u16 code_segment;
- u8 dos_boot_mode;
- u8 bandwidth_percent;
- u8 rsvd4; /* popup memory size */
- u8 resize_pci_bios;
- u8 rsvd5; /* is crt already on ddc2 */
-} __packed;
-
/*
* There are several types of BIOS data blocks (BDBs), each block has
* an ID and size in the first 3 bytes (ID in first, size in next 2).
* Known types are listed below.
*/
-#define BDB_GENERAL_FEATURES 1
-#define BDB_GENERAL_DEFINITIONS 2
-#define BDB_OLD_TOGGLE_LIST 3
-#define BDB_MODE_SUPPORT_LIST 4
-#define BDB_GENERIC_MODE_TABLE 5
-#define BDB_EXT_MMIO_REGS 6
-#define BDB_SWF_IO 7
-#define BDB_SWF_MMIO 8
-#define BDB_PSR 9
-#define BDB_MODE_REMOVAL_TABLE 10
-#define BDB_CHILD_DEVICE_TABLE 11
-#define BDB_DRIVER_FEATURES 12
-#define BDB_DRIVER_PERSISTENCE 13
-#define BDB_EXT_TABLE_PTRS 14
-#define BDB_DOT_CLOCK_OVERRIDE 15
-#define BDB_DISPLAY_SELECT 16
-/* 17 rsvd */
-#define BDB_DRIVER_ROTATION 18
-#define BDB_DISPLAY_REMOVE 19
-#define BDB_OEM_CUSTOM 20
-#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
-#define BDB_SDVO_LVDS_OPTIONS 22
-#define BDB_SDVO_PANEL_DTDS 23
-#define BDB_SDVO_LVDS_PNP_IDS 24
-#define BDB_SDVO_LVDS_POWER_SEQ 25
-#define BDB_TV_OPTIONS 26
-#define BDB_EDP 27
-#define BDB_LVDS_OPTIONS 40
-#define BDB_LVDS_LFP_DATA_PTRS 41
-#define BDB_LVDS_LFP_DATA 42
-#define BDB_LVDS_BACKLIGHT 43
-#define BDB_LVDS_POWER 44
-#define BDB_MIPI_CONFIG 52
-#define BDB_MIPI_SEQUENCE 53
-#define BDB_SKIP 254 /* VBIOS private block, ignore */
+enum bdb_block_id {
+ BDB_GENERAL_FEATURES = 1,
+ BDB_GENERAL_DEFINITIONS = 2,
+ BDB_OLD_TOGGLE_LIST = 3,
+ BDB_MODE_SUPPORT_LIST = 4,
+ BDB_GENERIC_MODE_TABLE = 5,
+ BDB_EXT_MMIO_REGS = 6,
+ BDB_SWF_IO = 7,
+ BDB_SWF_MMIO = 8,
+ BDB_PSR = 9,
+ BDB_MODE_REMOVAL_TABLE = 10,
+ BDB_CHILD_DEVICE_TABLE = 11,
+ BDB_DRIVER_FEATURES = 12,
+ BDB_DRIVER_PERSISTENCE = 13,
+ BDB_EXT_TABLE_PTRS = 14,
+ BDB_DOT_CLOCK_OVERRIDE = 15,
+ BDB_DISPLAY_SELECT = 16,
+ BDB_DRIVER_ROTATION = 18,
+ BDB_DISPLAY_REMOVE = 19,
+ BDB_OEM_CUSTOM = 20,
+ BDB_EFP_LIST = 21, /* workarounds for VGA hsync/vsync */
+ BDB_SDVO_LVDS_OPTIONS = 22,
+ BDB_SDVO_PANEL_DTDS = 23,
+ BDB_SDVO_LVDS_PNP_IDS = 24,
+ BDB_SDVO_LVDS_POWER_SEQ = 25,
+ BDB_TV_OPTIONS = 26,
+ BDB_EDP = 27,
+ BDB_LVDS_OPTIONS = 40,
+ BDB_LVDS_LFP_DATA_PTRS = 41,
+ BDB_LVDS_LFP_DATA = 42,
+ BDB_LVDS_BACKLIGHT = 43,
+ BDB_LVDS_POWER = 44,
+ BDB_MIPI_CONFIG = 52,
+ BDB_MIPI_SEQUENCE = 53,
+ BDB_SKIP = 254, /* VBIOS private block, ignore */
+};
+
+/*
+ * Block 1 - General Bit Definitions
+ */
struct bdb_general_features {
/* bits 1 */
@@ -176,6 +162,10 @@ struct bdb_general_features {
u8 rsvd11:2; /* finish byte */
} __packed;
+/*
+ * Block 2 - General Bytes Definition
+ */
+
/* pre-915 */
#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
@@ -324,6 +314,9 @@ enum vbt_gmbus_ddi {
ICL_DDC_BUS_PORT_2,
ICL_DDC_BUS_PORT_3,
ICL_DDC_BUS_PORT_4,
+ MCC_DDC_BUS_DDI_A = 0x1,
+ MCC_DDC_BUS_DDI_B,
+ MCC_DDC_BUS_DDI_C = 0x4,
};
#define DP_AUX_A 0x40
@@ -402,7 +395,8 @@ struct child_device_config {
u8 lspcon:1; /* 192 */
u8 iboost:1; /* 196 */
u8 hpd_invert:1; /* 196 */
- u8 flag_reserved:3;
+ u8 use_vbt_vswing:1; /* 218 */
+ u8 flag_reserved:2;
u8 hdmi_support:1; /* 158 */
u8 dp_support:1; /* 158 */
u8 tmds_support:1; /* 158 */
@@ -466,186 +460,36 @@ struct bdb_general_definitions {
u8 devices[0];
} __packed;
-/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
-#define MODE_MASK 0x3
-
-struct bdb_lvds_options {
- u8 panel_type;
- u8 rsvd1;
- /* LVDS capabilities, stored in a dword */
- u8 pfit_mode:2;
- u8 pfit_text_mode_enhanced:1;
- u8 pfit_gfx_mode_enhanced:1;
- u8 pfit_ratio_auto:1;
- u8 pixel_dither:1;
- u8 lvds_edid:1;
- u8 rsvd2:1;
- u8 rsvd4;
- /* LVDS Panel channel bits stored here */
- u32 lvds_panel_channel_bits;
- /* LVDS SSC (Spread Spectrum Clock) bits stored here. */
- u16 ssc_bits;
- u16 ssc_freq;
- u16 ssc_ddt;
- /* Panel color depth defined here */
- u16 panel_color_depth;
- /* LVDS panel type bits stored here */
- u32 dps_panel_type_bits;
- /* LVDS backlight control type bits stored here */
- u32 blt_control_type_bits;
-} __packed;
-
-/* LFP pointer table contains entries to the struct below */
-struct bdb_lvds_lfp_data_ptr {
- u16 fp_timing_offset; /* offsets are from start of bdb */
- u8 fp_table_size;
- u16 dvo_timing_offset;
- u8 dvo_table_size;
- u16 panel_pnp_id_offset;
- u8 pnp_table_size;
-} __packed;
-
-struct bdb_lvds_lfp_data_ptrs {
- u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
- struct bdb_lvds_lfp_data_ptr ptr[16];
-} __packed;
-
-/* LFP data has 3 blocks per entry */
-struct lvds_fp_timing {
- u16 x_res;
- u16 y_res;
- u32 lvds_reg;
- u32 lvds_reg_val;
- u32 pp_on_reg;
- u32 pp_on_reg_val;
- u32 pp_off_reg;
- u32 pp_off_reg_val;
- u32 pp_cycle_reg;
- u32 pp_cycle_reg_val;
- u32 pfit_reg;
- u32 pfit_reg_val;
- u16 terminator;
-} __packed;
-
-struct lvds_dvo_timing {
- u16 clock; /**< In 10khz */
- u8 hactive_lo;
- u8 hblank_lo;
- u8 hblank_hi:4;
- u8 hactive_hi:4;
- u8 vactive_lo;
- u8 vblank_lo;
- u8 vblank_hi:4;
- u8 vactive_hi:4;
- u8 hsync_off_lo;
- u8 hsync_pulse_width_lo;
- u8 vsync_pulse_width_lo:4;
- u8 vsync_off_lo:4;
- u8 vsync_pulse_width_hi:2;
- u8 vsync_off_hi:2;
- u8 hsync_pulse_width_hi:2;
- u8 hsync_off_hi:2;
- u8 himage_lo;
- u8 vimage_lo;
- u8 vimage_hi:4;
- u8 himage_hi:4;
- u8 h_border;
- u8 v_border;
- u8 rsvd1:3;
- u8 digital:2;
- u8 vsync_positive:1;
- u8 hsync_positive:1;
- u8 non_interlaced:1;
-} __packed;
-
-struct lvds_pnp_id {
- u16 mfg_name;
- u16 product_code;
- u32 serial;
- u8 mfg_week;
- u8 mfg_year;
-} __packed;
-
-struct bdb_lvds_lfp_data_entry {
- struct lvds_fp_timing fp_timing;
- struct lvds_dvo_timing dvo_timing;
- struct lvds_pnp_id pnp_id;
-} __packed;
-
-struct bdb_lvds_lfp_data {
- struct bdb_lvds_lfp_data_entry data[16];
-} __packed;
-
-#define BDB_BACKLIGHT_TYPE_NONE 0
-#define BDB_BACKLIGHT_TYPE_PWM 2
-
-struct bdb_lfp_backlight_data_entry {
- u8 type:2;
- u8 active_low_pwm:1;
- u8 obsolete1:5;
- u16 pwm_freq_hz;
- u8 min_brightness;
- u8 obsolete2;
- u8 obsolete3;
-} __packed;
-
-struct bdb_lfp_backlight_control_method {
- u8 type:4;
- u8 controller:4;
-} __packed;
-
-struct bdb_lfp_backlight_data {
- u8 entry_size;
- struct bdb_lfp_backlight_data_entry data[16];
- u8 level[16];
- struct bdb_lfp_backlight_control_method backlight_control[16];
-} __packed;
+/*
+ * Block 9 - SRD Feature Block
+ */
-struct aimdb_header {
- char signature[16];
- char oem_device[20];
- u16 aimdb_version;
- u16 aimdb_header_size;
- u16 aimdb_size;
-} __packed;
+struct psr_table {
+ /* Feature bits */
+ u8 full_link:1;
+ u8 require_aux_to_wakeup:1;
+ u8 feature_bits_rsvd:6;
-struct aimdb_block {
- u8 aimdb_id;
- u16 aimdb_size;
-} __packed;
+ /* Wait times */
+ u8 idle_frames:4;
+ u8 lines_to_wait:3;
+ u8 wait_times_rsvd:1;
-struct vch_panel_data {
- u16 fp_timing_offset;
- u8 fp_timing_size;
- u16 dvo_timing_offset;
- u8 dvo_timing_size;
- u16 text_fitting_offset;
- u8 text_fitting_size;
- u16 graphics_fitting_offset;
- u8 graphics_fitting_size;
-} __packed;
+ /* TP wake up time in multiple of 100 */
+ u16 tp1_wakeup_time;
+ u16 tp2_tp3_wakeup_time;
-struct vch_bdb_22 {
- struct aimdb_block aimdb_block;
- struct vch_panel_data panels[16];
+ /* PSR2 TP2/TP3 wakeup time for 16 panels */
+ u32 psr2_tp2_tp3_wakeup_time;
} __packed;
-struct bdb_sdvo_lvds_options {
- u8 panel_backlight;
- u8 h40_set_panel_type;
- u8 panel_type;
- u8 ssc_clk_freq;
- u16 als_low_trip;
- u16 als_high_trip;
- u8 sclalarcoeff_tab_row_num;
- u8 sclalarcoeff_tab_row_size;
- u8 coefficient[8];
- u8 panel_misc_bits_1;
- u8 panel_misc_bits_2;
- u8 panel_misc_bits_3;
- u8 panel_misc_bits_4;
+struct bdb_psr {
+ struct psr_table psr_table[16];
} __packed;
+/*
+ * Block 12 - Driver Features Data Block
+ */
#define BDB_DRIVER_FEATURE_NO_LVDS 0
#define BDB_DRIVER_FEATURE_INT_LVDS 1
@@ -706,6 +550,69 @@ struct bdb_driver_features {
u16 pc_feature_valid:1;
} __packed;
+/*
+ * Block 22 - SDVO LVDS General Options
+ */
+
+struct bdb_sdvo_lvds_options {
+ u8 panel_backlight;
+ u8 h40_set_panel_type;
+ u8 panel_type;
+ u8 ssc_clk_freq;
+ u16 als_low_trip;
+ u16 als_high_trip;
+ u8 sclalarcoeff_tab_row_num;
+ u8 sclalarcoeff_tab_row_size;
+ u8 coefficient[8];
+ u8 panel_misc_bits_1;
+ u8 panel_misc_bits_2;
+ u8 panel_misc_bits_3;
+ u8 panel_misc_bits_4;
+} __packed;
+
+/*
+ * Block 23 - SDVO LVDS Panel DTDs
+ */
+
+struct lvds_dvo_timing {
+ u16 clock; /**< In 10khz */
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hblank_hi:4;
+ u8 hactive_hi:4;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vblank_hi:4;
+ u8 vactive_hi:4;
+ u8 hsync_off_lo;
+ u8 hsync_pulse_width_lo;
+ u8 vsync_pulse_width_lo:4;
+ u8 vsync_off_lo:4;
+ u8 vsync_pulse_width_hi:2;
+ u8 vsync_off_hi:2;
+ u8 hsync_pulse_width_hi:2;
+ u8 hsync_off_hi:2;
+ u8 himage_lo;
+ u8 vimage_lo;
+ u8 vimage_hi:4;
+ u8 himage_hi:4;
+ u8 h_border;
+ u8 v_border;
+ u8 rsvd1:3;
+ u8 digital:2;
+ u8 vsync_positive:1;
+ u8 hsync_positive:1;
+ u8 non_interlaced:1;
+} __packed;
+
+struct bdb_sdvo_panel_dtds {
+ struct lvds_dvo_timing dtds[4];
+} __packed;
+
+/*
+ * Block 27 - eDP VBT Block
+ */
+
#define EDP_18BPP 0
#define EDP_24BPP 1
#define EDP_30BPP 2
@@ -758,154 +665,133 @@ struct bdb_edp {
struct edp_full_link_params full_link_params[16]; /* 199 */
} __packed;
-struct psr_table {
- /* Feature bits */
- u8 full_link:1;
- u8 require_aux_to_wakeup:1;
- u8 feature_bits_rsvd:6;
+/*
+ * Block 40 - LFP Data Block
+ */
- /* Wait times */
- u8 idle_frames:4;
- u8 lines_to_wait:3;
- u8 wait_times_rsvd:1;
+/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
+#define MODE_MASK 0x3
- /* TP wake up time in multiple of 100 */
- u16 tp1_wakeup_time;
- u16 tp2_tp3_wakeup_time;
+struct bdb_lvds_options {
+ u8 panel_type;
+ u8 panel_type2; /* 212 */
+ /* LVDS capabilities, stored in a dword */
+ u8 pfit_mode:2;
+ u8 pfit_text_mode_enhanced:1;
+ u8 pfit_gfx_mode_enhanced:1;
+ u8 pfit_ratio_auto:1;
+ u8 pixel_dither:1;
+ u8 lvds_edid:1;
+ u8 rsvd2:1;
+ u8 rsvd4;
+ /* LVDS Panel channel bits stored here */
+ u32 lvds_panel_channel_bits;
+ /* LVDS SSC (Spread Spectrum Clock) bits stored here. */
+ u16 ssc_bits;
+ u16 ssc_freq;
+ u16 ssc_ddt;
+ /* Panel color depth defined here */
+ u16 panel_color_depth;
+ /* LVDS panel type bits stored here */
+ u32 dps_panel_type_bits;
+ /* LVDS backlight control type bits stored here */
+ u32 blt_control_type_bits;
- /* PSR2 TP2/TP3 wakeup time for 16 panels */
- u32 psr2_tp2_tp3_wakeup_time;
+ u16 lcdvcc_s0_enable; /* 200 */
+ u32 rotation; /* 228 */
} __packed;
-struct bdb_psr {
- struct psr_table psr_table[16];
+/*
+ * Block 41 - LFP Data Table Pointers
+ */
+
+/* LFP pointer table contains entries to the struct below */
+struct lvds_lfp_data_ptr {
+ u16 fp_timing_offset; /* offsets are from start of bdb */
+ u8 fp_table_size;
+ u16 dvo_timing_offset;
+ u8 dvo_table_size;
+ u16 panel_pnp_id_offset;
+ u8 pnp_table_size;
+} __packed;
+
+struct bdb_lvds_lfp_data_ptrs {
+ u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+ struct lvds_lfp_data_ptr ptr[16];
} __packed;
/*
- * Driver<->VBIOS interaction occurs through scratch bits in
- * GR18 & SWF*.
+ * Block 42 - LFP Data Tables
*/
-/* GR18 bits are set on display switch and hotkey events */
-#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
-#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
-#define GR18_HK_NONE (0x0<<3)
-#define GR18_HK_LFP_STRETCH (0x1<<3)
-#define GR18_HK_TOGGLE_DISP (0x2<<3)
-#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
-#define GR18_HK_POPUP_DISABLED (0x6<<3)
-#define GR18_HK_POPUP_ENABLED (0x7<<3)
-#define GR18_HK_PFIT (0x8<<3)
-#define GR18_HK_APM_CHANGE (0xa<<3)
-#define GR18_HK_MULTIPLE (0xc<<3)
-#define GR18_USER_INT_EN (1<<2)
-#define GR18_A0000_FLUSH_EN (1<<1)
-#define GR18_SMM_EN (1<<0)
-
-/* Set by driver, cleared by VBIOS */
-#define SWF00_YRES_SHIFT 16
-#define SWF00_XRES_SHIFT 0
-#define SWF00_RES_MASK 0xffff
-
-/* Set by VBIOS at boot time and driver at runtime */
-#define SWF01_TV2_FORMAT_SHIFT 8
-#define SWF01_TV1_FORMAT_SHIFT 0
-#define SWF01_TV_FORMAT_MASK 0xffff
-
-#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
-#define SWF10_GTT_OVERRIDE_EN (1<<28)
-#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
-#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
-#define SWF10_OLD_TOGGLE 0x0
-#define SWF10_TOGGLE_LIST_1 0x1
-#define SWF10_TOGGLE_LIST_2 0x2
-#define SWF10_TOGGLE_LIST_3 0x3
-#define SWF10_TOGGLE_LIST_4 0x4
-#define SWF10_PANNING_EN (1<<23)
-#define SWF10_DRIVER_LOADED (1<<22)
-#define SWF10_EXTENDED_DESKTOP (1<<21)
-#define SWF10_EXCLUSIVE_MODE (1<<20)
-#define SWF10_OVERLAY_EN (1<<19)
-#define SWF10_PLANEB_HOLDOFF (1<<18)
-#define SWF10_PLANEA_HOLDOFF (1<<17)
-#define SWF10_VGA_HOLDOFF (1<<16)
-#define SWF10_ACTIVE_DISP_MASK 0xffff
-#define SWF10_PIPEB_LFP2 (1<<15)
-#define SWF10_PIPEB_EFP2 (1<<14)
-#define SWF10_PIPEB_TV2 (1<<13)
-#define SWF10_PIPEB_CRT2 (1<<12)
-#define SWF10_PIPEB_LFP (1<<11)
-#define SWF10_PIPEB_EFP (1<<10)
-#define SWF10_PIPEB_TV (1<<9)
-#define SWF10_PIPEB_CRT (1<<8)
-#define SWF10_PIPEA_LFP2 (1<<7)
-#define SWF10_PIPEA_EFP2 (1<<6)
-#define SWF10_PIPEA_TV2 (1<<5)
-#define SWF10_PIPEA_CRT2 (1<<4)
-#define SWF10_PIPEA_LFP (1<<3)
-#define SWF10_PIPEA_EFP (1<<2)
-#define SWF10_PIPEA_TV (1<<1)
-#define SWF10_PIPEA_CRT (1<<0)
-
-#define SWF11_MEMORY_SIZE_SHIFT 16
-#define SWF11_SV_TEST_EN (1<<15)
-#define SWF11_IS_AGP (1<<14)
-#define SWF11_DISPLAY_HOLDOFF (1<<13)
-#define SWF11_DPMS_REDUCED (1<<12)
-#define SWF11_IS_VBE_MODE (1<<11)
-#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
-#define SWF11_DPMS_MASK 0x07
-#define SWF11_DPMS_OFF (1<<2)
-#define SWF11_DPMS_SUSPEND (1<<1)
-#define SWF11_DPMS_STANDBY (1<<0)
-#define SWF11_DPMS_ON 0
-
-#define SWF14_GFX_PFIT_EN (1<<31)
-#define SWF14_TEXT_PFIT_EN (1<<30)
-#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
-#define SWF14_POPUP_EN (1<<28)
-#define SWF14_DISPLAY_HOLDOFF (1<<27)
-#define SWF14_DISP_DETECT_EN (1<<26)
-#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
-#define SWF14_DRIVER_STATUS (1<<24)
-#define SWF14_OS_TYPE_WIN9X (1<<23)
-#define SWF14_OS_TYPE_WINNT (1<<22)
-/* 21:19 rsvd */
-#define SWF14_PM_TYPE_MASK 0x00070000
-#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
-#define SWF14_PM_ACPI (0x3 << 16)
-#define SWF14_PM_APM_12 (0x2 << 16)
-#define SWF14_PM_APM_11 (0x1 << 16)
-#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
- /* if GR18 indicates a display switch */
-#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
-#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
-#define SWF14_DS_PIPEB_TV2_EN (1<<13)
-#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
-#define SWF14_DS_PIPEB_LFP_EN (1<<11)
-#define SWF14_DS_PIPEB_EFP_EN (1<<10)
-#define SWF14_DS_PIPEB_TV_EN (1<<9)
-#define SWF14_DS_PIPEB_CRT_EN (1<<8)
-#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
-#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
-#define SWF14_DS_PIPEA_TV2_EN (1<<5)
-#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
-#define SWF14_DS_PIPEA_LFP_EN (1<<3)
-#define SWF14_DS_PIPEA_EFP_EN (1<<2)
-#define SWF14_DS_PIPEA_TV_EN (1<<1)
-#define SWF14_DS_PIPEA_CRT_EN (1<<0)
- /* if GR18 indicates a panel fitting request */
-#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
- /* if GR18 indicates an APM change request */
-#define SWF14_APM_HIBERNATE 0x4
-#define SWF14_APM_SUSPEND 0x3
-#define SWF14_APM_STANDBY 0x1
-#define SWF14_APM_RESTORE 0x0
-
-/* Block 52 contains MIPI configuration block
- * 6 * bdb_mipi_config, followed by 6 pps data block
- * block below
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+ u16 x_res;
+ u16 y_res;
+ u32 lvds_reg;
+ u32 lvds_reg_val;
+ u32 pp_on_reg;
+ u32 pp_on_reg_val;
+ u32 pp_off_reg;
+ u32 pp_off_reg_val;
+ u32 pp_cycle_reg;
+ u32 pp_cycle_reg_val;
+ u32 pfit_reg;
+ u32 pfit_reg_val;
+ u16 terminator;
+} __packed;
+
+struct lvds_pnp_id {
+ u16 mfg_name;
+ u16 product_code;
+ u32 serial;
+ u8 mfg_week;
+ u8 mfg_year;
+} __packed;
+
+struct lvds_lfp_data_entry {
+ struct lvds_fp_timing fp_timing;
+ struct lvds_dvo_timing dvo_timing;
+ struct lvds_pnp_id pnp_id;
+} __packed;
+
+struct bdb_lvds_lfp_data {
+ struct lvds_lfp_data_entry data[16];
+} __packed;
+
+/*
+ * Block 43 - LFP Backlight Control Data Block
*/
+
+#define BDB_BACKLIGHT_TYPE_NONE 0
+#define BDB_BACKLIGHT_TYPE_PWM 2
+
+struct lfp_backlight_data_entry {
+ u8 type:2;
+ u8 active_low_pwm:1;
+ u8 obsolete1:5;
+ u16 pwm_freq_hz;
+ u8 min_brightness;
+ u8 obsolete2;
+ u8 obsolete3;
+} __packed;
+
+struct lfp_backlight_control_method {
+ u8 type:4;
+ u8 controller:4;
+} __packed;
+
+struct bdb_lfp_backlight_data {
+ u8 entry_size;
+ struct lfp_backlight_data_entry data[16];
+ u8 level[16];
+ struct lfp_backlight_control_method backlight_control[16];
+} __packed;
+
+/*
+ * Block 52 - MIPI Configuration Block
+ */
+
#define MAX_MIPI_CONFIGURATIONS 6
struct bdb_mipi_config {
@@ -913,24 +799,13 @@ struct bdb_mipi_config {
struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
} __packed;
-/* Block 53 contains MIPI sequences as needed by the panel
- * for enabling it. This block can be variable in size and
- * can be maximum of 6 blocks
+/*
+ * Block 53 - MIPI Sequence Block
*/
+
struct bdb_mipi_sequence {
u8 version;
- u8 data[0];
+ u8 data[0]; /* up to 6 variable length blocks */
} __packed;
-enum mipi_gpio_pin_index {
- MIPI_GPIO_UNDEFINED = 0,
- MIPI_GPIO_PANEL_ENABLE,
- MIPI_GPIO_BL_ENABLE,
- MIPI_GPIO_PWM_ENABLE,
- MIPI_GPIO_RESET_N,
- MIPI_GPIO_PWR_DOWN_R,
- MIPI_GPIO_STDBY_RST_N,
- MIPI_GPIO_MAX
-};
-
#endif /* _INTEL_VBT_DEFS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 3f9921ba4a76..ffec807b8960 100644
--- a/drivers/gpu/drm/i915/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -7,8 +7,10 @@
*/
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_vdsc.h"
enum ROW_INDEX_BPP {
ROW_INDEX_6BPP = 0,
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
new file mode 100644
index 000000000000..90d3f6017fcb
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_VDSC_H__
+#define __INTEL_VDSC_H__
+
+struct intel_encoder;
+struct intel_crtc_state;
+struct intel_dp;
+
+void intel_dsc_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
+int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config);
+enum intel_display_power_domain
+intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_VDSC_H__ */
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index e0b1ec821960..e272d826210a 100644
--- a/drivers/gpu/drm/i915/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -30,13 +30,15 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "intel_atomic.h"
#include "intel_connector.h"
#include "intel_drv.h"
#include "intel_dsi.h"
+#include "intel_fifo_underrun.h"
#include "intel_panel.h"
+#include "intel_sideband.h"
/* return pixels in terms of txbyteclkhs */
static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
@@ -248,7 +250,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
static void band_gap_reset(struct drm_i915_private *dev_priv)
{
- mutex_lock(&dev_priv->sb_lock);
+ vlv_flisdsi_get(dev_priv);
vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
vlv_flisdsi_write(dev_priv, 0x0F, 0x0005);
@@ -257,29 +259,7 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
vlv_flisdsi_write(dev_priv, 0x0F, 0x0000);
vlv_flisdsi_write(dev_priv, 0x08, 0x0000);
- mutex_unlock(&dev_priv->sb_lock);
-}
-
-static int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 tmp;
-
- tmp = I915_READ(PIPEMISC(crtc->pipe));
-
- switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
- case PIPEMISC_DITHER_6_BPC:
- return 18;
- case PIPEMISC_DITHER_8_BPC:
- return 24;
- case PIPEMISC_DITHER_10_BPC:
- return 30;
- case PIPEMISC_DITHER_12_BPC:
- return 36;
- default:
- MISSING_CASE(tmp);
- return 0;
- }
+ vlv_flisdsi_put(dev_priv);
}
static int intel_dsi_compute_config(struct intel_encoder *encoder,
@@ -515,11 +495,11 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
- mutex_lock(&dev_priv->sb_lock);
+ vlv_flisdsi_get(dev_priv);
/* program rcomp for compliance, reduce from 50 ohms to 45 ohms
* needed everytime after power gate */
vlv_flisdsi_write(dev_priv, 0x04, 0x0004);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_flisdsi_put(dev_priv);
/* bandgap reset is needed after everytime we do power gate */
band_gap_reset(dev_priv);
@@ -1689,6 +1669,174 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
}
}
+#define NS_KHZ_RATIO 1000000
+
+#define PREPARE_CNT_MAX 0x3F
+#define EXIT_ZERO_CNT_MAX 0x3F
+#define CLK_ZERO_CNT_MAX 0xFF
+#define TRAIL_CNT_MAX 0x1F
+
+static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ u32 tlpx_ns, extra_byte_count, tlpx_ui;
+ u32 ui_num, ui_den;
+ u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
+ u32 ths_prepare_ns, tclk_trail_ns;
+ u32 tclk_prepare_clkzero, ths_prepare_hszero;
+ u32 lp_to_hs_switch, hs_to_lp_switch;
+ u32 mul;
+
+ tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
+
+ switch (intel_dsi->lane_count) {
+ case 1:
+ case 2:
+ extra_byte_count = 2;
+ break;
+ case 3:
+ extra_byte_count = 4;
+ break;
+ case 4:
+ default:
+ extra_byte_count = 3;
+ break;
+ }
+
+ /* in Kbps */
+ ui_num = NS_KHZ_RATIO;
+ ui_den = intel_dsi_bitrate(intel_dsi);
+
+ tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero;
+ ths_prepare_hszero = mipi_config->ths_prepare_hszero;
+
+ /*
+ * B060
+ * LP byte clock = TLPX/ (8UI)
+ */
+ intel_dsi->lp_byte_clk = DIV_ROUND_UP(tlpx_ns * ui_den, 8 * ui_num);
+
+ /* DDR clock period = 2 * UI
+ * UI(sec) = 1/(bitrate * 10^3) (bitrate is in KHZ)
+ * UI(nsec) = 10^6 / bitrate
+ * DDR clock period (nsec) = 2 * UI = (2 * 10^6)/ bitrate
+ * DDR clock count = ns_value / DDR clock period
+ *
+ * For GEMINILAKE dphy_param_reg will be programmed in terms of
+ * HS byte clock count for other platform in HS ddr clock count
+ */
+ mul = IS_GEMINILAKE(dev_priv) ? 8 : 2;
+ ths_prepare_ns = max(mipi_config->ths_prepare,
+ mipi_config->tclk_prepare);
+
+ /* prepare count */
+ prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul);
+
+ if (prepare_cnt > PREPARE_CNT_MAX) {
+ DRM_DEBUG_KMS("prepare count too high %u\n", prepare_cnt);
+ prepare_cnt = PREPARE_CNT_MAX;
+ }
+
+ /* exit zero count */
+ exit_zero_cnt = DIV_ROUND_UP(
+ (ths_prepare_hszero - ths_prepare_ns) * ui_den,
+ ui_num * mul
+ );
+
+ /*
+ * Exit zero is unified val ths_zero and ths_exit
+ * minimum value for ths_exit = 110ns
+ * min (exit_zero_cnt * 2) = 110/UI
+ * exit_zero_cnt = 55/UI
+ */
+ if (exit_zero_cnt < (55 * ui_den / ui_num) && (55 * ui_den) % ui_num)
+ exit_zero_cnt += 1;
+
+ if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("exit zero count too high %u\n", exit_zero_cnt);
+ exit_zero_cnt = EXIT_ZERO_CNT_MAX;
+ }
+
+ /* clk zero count */
+ clk_zero_cnt = DIV_ROUND_UP(
+ (tclk_prepare_clkzero - ths_prepare_ns)
+ * ui_den, ui_num * mul);
+
+ if (clk_zero_cnt > CLK_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("clock zero count too high %u\n", clk_zero_cnt);
+ clk_zero_cnt = CLK_ZERO_CNT_MAX;
+ }
+
+ /* trail count */
+ tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
+ trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul);
+
+ if (trail_cnt > TRAIL_CNT_MAX) {
+ DRM_DEBUG_KMS("trail count too high %u\n", trail_cnt);
+ trail_cnt = TRAIL_CNT_MAX;
+ }
+
+ /* B080 */
+ intel_dsi->dphy_reg = exit_zero_cnt << 24 | trail_cnt << 16 |
+ clk_zero_cnt << 8 | prepare_cnt;
+
+ /*
+ * LP to HS switch count = 4TLPX + PREP_COUNT * mul + EXIT_ZERO_COUNT *
+ * mul + 10UI + Extra Byte Count
+ *
+ * HS to LP switch count = THS-TRAIL + 2TLPX + Extra Byte Count
+ * Extra Byte Count is calculated according to number of lanes.
+ * High Low Switch Count is the Max of LP to HS and
+ * HS to LP switch count
+ *
+ */
+ tlpx_ui = DIV_ROUND_UP(tlpx_ns * ui_den, ui_num);
+
+ /* B044 */
+ /* FIXME:
+ * The comment above does not match with the code */
+ lp_to_hs_switch = DIV_ROUND_UP(4 * tlpx_ui + prepare_cnt * mul +
+ exit_zero_cnt * mul + 10, 8);
+
+ hs_to_lp_switch = DIV_ROUND_UP(mipi_config->ths_trail + 2 * tlpx_ui, 8);
+
+ intel_dsi->hs_to_lp_count = max(lp_to_hs_switch, hs_to_lp_switch);
+ intel_dsi->hs_to_lp_count += extra_byte_count;
+
+ /* B088 */
+ /* LP -> HS for clock lanes
+ * LP clk sync + LP11 + LP01 + tclk_prepare + tclk_zero +
+ * extra byte count
+ * 2TPLX + 1TLPX + 1 TPLX(in ns) + prepare_cnt * 2 + clk_zero_cnt *
+ * 2(in UI) + extra byte count
+ * In byteclks = (4TLPX + prepare_cnt * 2 + clk_zero_cnt *2 (in UI)) /
+ * 8 + extra byte count
+ */
+ intel_dsi->clk_lp_to_hs_count =
+ DIV_ROUND_UP(
+ 4 * tlpx_ui + prepare_cnt * 2 +
+ clk_zero_cnt * 2,
+ 8);
+
+ intel_dsi->clk_lp_to_hs_count += extra_byte_count;
+
+ /* HS->LP for Clock Lanes
+ * Low Power clock synchronisations + 1Tx byteclk + tclk_trail +
+ * Extra byte count
+ * 2TLPX + 8UI + (trail_count*2)(in UI) + Extra byte count
+ * In byteclks = (2*TLpx(in UI) + trail_count*2 +8)(in UI)/8 +
+ * Extra byte count
+ */
+ intel_dsi->clk_hs_to_lp_count =
+ DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8,
+ 8);
+ intel_dsi->clk_hs_to_lp_count += extra_byte_count;
+
+ intel_dsi_log_params(intel_dsi);
+}
+
void vlv_dsi_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
@@ -1697,7 +1845,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
- struct drm_display_mode *fixed_mode;
+ struct drm_display_mode *current_mode, *fixed_mode;
enum port port;
DRM_DEBUG_KMS("\n");
@@ -1741,6 +1889,9 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_encoder->port = port;
+ intel_encoder->type = INTEL_OUTPUT_DSI;
+ intel_encoder->power_domain = POWER_DOMAIN_PORT_DSI;
+ intel_encoder->cloneable = 0;
/*
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
@@ -1778,6 +1929,22 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
goto err;
}
+ /* Use clock read-back from current hw-state for fastboot */
+ current_mode = intel_encoder_current_mode(intel_encoder);
+ if (current_mode) {
+ DRM_DEBUG_KMS("Calculated pclk %d GOP %d\n",
+ intel_dsi->pclk, current_mode->clock);
+ if (intel_fuzzy_clock_check(intel_dsi->pclk,
+ current_mode->clock)) {
+ DRM_DEBUG_KMS("Using GOP pclk\n");
+ intel_dsi->pclk = current_mode->clock;
+ }
+
+ kfree(current_mode);
+ }
+
+ vlv_dphy_param_init(intel_dsi);
+
/*
* In case of BYT with CRC PMIC, we need to use GPIO for
* Panel control.
@@ -1793,9 +1960,6 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
}
}
- intel_encoder->type = INTEL_OUTPUT_DSI;
- intel_encoder->power_domain = POWER_DOMAIN_PORT_DSI;
- intel_encoder->cloneable = 0;
drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
@@ -1813,7 +1977,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
if (!fixed_mode) {
DRM_DEBUG_KMS("no fixed mode\n");
- goto err;
+ goto err_cleanup_connector;
}
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
@@ -1823,6 +1987,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
return;
+err_cleanup_connector:
+ drm_connector_cleanup(&intel_connector->base);
err:
drm_encoder_cleanup(&intel_encoder->base);
kfree(intel_dsi);
diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 5e7b1fb2db5d..99cc3e2e9c2c 100644
--- a/drivers/gpu/drm/i915/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -26,9 +26,11 @@
*/
#include <linux/kernel.h>
-#include "intel_drv.h"
+
#include "i915_drv.h"
+#include "intel_drv.h"
#include "intel_dsi.h"
+#include "intel_sideband.h"
static const u16 lfsr_converts[] = {
426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
@@ -149,7 +151,7 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
DRM_DEBUG_KMS("\n");
- mutex_lock(&dev_priv->sb_lock);
+ vlv_cck_get(dev_priv);
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, config->dsi_pll.div);
@@ -166,11 +168,11 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) &
DSI_PLL_LOCK, 20)) {
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_cck_put(dev_priv);
DRM_ERROR("DSI PLL lock failed\n");
return;
}
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_cck_put(dev_priv);
DRM_DEBUG_KMS("DSI PLL locked\n");
}
@@ -182,14 +184,14 @@ void vlv_dsi_pll_disable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
- mutex_lock(&dev_priv->sb_lock);
+ vlv_cck_get(dev_priv);
tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
tmp &= ~DSI_PLL_VCO_EN;
tmp |= DSI_PLL_LDO_GATE;
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_cck_put(dev_priv);
}
bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
@@ -266,10 +268,10 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
DRM_DEBUG_KMS("\n");
- mutex_lock(&dev_priv->sb_lock);
+ vlv_cck_get(dev_priv);
pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_cck_put(dev_priv);
config->dsi_pll.ctrl = pll_ctl & ~DSI_PLL_LOCK;
config->dsi_pll.div = pll_div;
diff --git a/drivers/gpu/drm/i915/gem/Makefile b/drivers/gpu/drm/i915/gem/Makefile
new file mode 100644
index 000000000000..07e7b8b840ea
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/Makefile
@@ -0,0 +1 @@
+include $(src)/Makefile.header-test # Extra header tests
diff --git a/drivers/gpu/drm/i915/gem/Makefile.header-test b/drivers/gpu/drm/i915/gem/Makefile.header-test
new file mode 100644
index 000000000000..61e06cbb4b32
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/Makefile.header-test
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: MIT
+# Copyright © 2019 Intel Corporation
+
+# Test the headers are compilable as standalone units
+header_test := $(notdir $(wildcard $(src)/*.h))
+
+quiet_cmd_header_test = HDRTEST $@
+ cmd_header_test = echo "\#include \"$(<F)\"" > $@
+
+header_test_%.c: %.h
+ $(call cmd,header_test)
+
+extra-$(CONFIG_DRM_I915_WERROR) += \
+ $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
+
+clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
new file mode 100644
index 000000000000..6ad93a09968c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -0,0 +1,139 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2016 Intel Corporation
+ */
+
+#include "gt/intel_engine.h"
+
+#include "i915_gem_ioctls.h"
+#include "i915_gem_object.h"
+
+static __always_inline u32 __busy_read_flag(u8 id)
+{
+ if (id == (u8)I915_ENGINE_CLASS_INVALID)
+ return 0xffff0000u;
+
+ GEM_BUG_ON(id >= 16);
+ return 0x10000u << id;
+}
+
+static __always_inline u32 __busy_write_id(u8 id)
+{
+ /*
+ * The uABI guarantees an active writer is also amongst the read
+ * engines. This would be true if we accessed the activity tracking
+ * under the lock, but as we perform the lookup of the object and
+ * its activity locklessly we can not guarantee that the last_write
+ * being active implies that we have set the same engine flag from
+ * last_read - hence we always set both read and write busy for
+ * last_write.
+ */
+ if (id == (u8)I915_ENGINE_CLASS_INVALID)
+ return 0xffffffffu;
+
+ return (id + 1) | __busy_read_flag(id);
+}
+
+static __always_inline unsigned int
+__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
+{
+ const struct i915_request *rq;
+
+ /*
+ * We have to check the current hw status of the fence as the uABI
+ * guarantees forward progress. We could rely on the idle worker
+ * to eventually flush us, but to minimise latency just ask the
+ * hardware.
+ *
+ * Note we only report on the status of native fences.
+ */
+ if (!dma_fence_is_i915(fence))
+ return 0;
+
+ /* opencode to_request() in order to avoid const warnings */
+ rq = container_of(fence, const struct i915_request, fence);
+ if (i915_request_completed(rq))
+ return 0;
+
+ /* Beware type-expansion follies! */
+ BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
+ return flag(rq->engine->uabi_class);
+}
+
+static __always_inline unsigned int
+busy_check_reader(const struct dma_fence *fence)
+{
+ return __busy_set_if_active(fence, __busy_read_flag);
+}
+
+static __always_inline unsigned int
+busy_check_writer(const struct dma_fence *fence)
+{
+ if (!fence)
+ return 0;
+
+ return __busy_set_if_active(fence, __busy_write_id);
+}
+
+int
+i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_busy *args = data;
+ struct drm_i915_gem_object *obj;
+ struct reservation_object_list *list;
+ unsigned int seq;
+ int err;
+
+ err = -ENOENT;
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, args->handle);
+ if (!obj)
+ goto out;
+
+ /*
+ * A discrepancy here is that we do not report the status of
+ * non-i915 fences, i.e. even though we may report the object as idle,
+ * a call to set-domain may still stall waiting for foreign rendering.
+ * This also means that wait-ioctl may report an object as busy,
+ * where busy-ioctl considers it idle.
+ *
+ * We trade the ability to warn of foreign fences to report on which
+ * i915 engines are active for the object.
+ *
+ * Alternatively, we can trade that extra information on read/write
+ * activity with
+ * args->busy =
+ * !reservation_object_test_signaled_rcu(obj->resv, true);
+ * to report the overall busyness. This is what the wait-ioctl does.
+ *
+ */
+retry:
+ seq = raw_read_seqcount(&obj->base.resv->seq);
+
+ /* Translate the exclusive fence to the READ *and* WRITE engine */
+ args->busy =
+ busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
+
+ /* Translate shared fences to READ set of engines */
+ list = rcu_dereference(obj->base.resv->fence);
+ if (list) {
+ unsigned int shared_count = list->shared_count, i;
+
+ for (i = 0; i < shared_count; ++i) {
+ struct dma_fence *fence =
+ rcu_dereference(list->shared[i]);
+
+ args->busy |= busy_check_reader(fence);
+ }
+ }
+
+ if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
+ goto retry;
+
+ err = 0;
+out:
+ rcu_read_unlock();
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 8e74c23cbd91..5295285d5843 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -1,29 +1,12 @@
/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2016 Intel Corporation
*/
+#include "display/intel_frontbuffer.h"
+
#include "i915_drv.h"
-#include "intel_frontbuffer.h"
#include "i915_gem_clflush.h"
static DEFINE_SPINLOCK(clflush_lock);
@@ -113,6 +96,8 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
{
struct clflush *clflush;
+ assert_object_held(obj);
+
/*
* Stolen memory is always coherent with the GPU as it is explicitly
* marked as wc by the system, or the system is cache-coherent.
@@ -158,13 +143,12 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
dma_fence_get(&clflush->dma);
i915_sw_fence_await_reservation(&clflush->wait,
- obj->resv, NULL,
+ obj->base.resv, NULL,
true, I915_FENCE_TIMEOUT,
I915_FENCE_GFP);
- reservation_object_lock(obj->resv, NULL);
- reservation_object_add_excl_fence(obj->resv, &clflush->dma);
- reservation_object_unlock(obj->resv);
+ reservation_object_add_excl_fence(obj->base.resv,
+ &clflush->dma);
i915_sw_fence_commit(&clflush->wait);
} else if (obj->mm.pages) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.h b/drivers/gpu/drm/i915/gem/i915_gem_clflush.h
new file mode 100644
index 000000000000..e6c382973129
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.h
@@ -0,0 +1,20 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef __I915_GEM_CLFLUSH_H__
+#define __I915_GEM_CLFLUSH_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+
+bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+ unsigned int flags);
+#define I915_CLFLUSH_FORCE BIT(0)
+#define I915_CLFLUSH_SYNC BIT(1)
+
+#endif /* __I915_GEM_CLFLUSH_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
new file mode 100644
index 000000000000..1fdab0767a47
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+#include "i915_gem_client_blt.h"
+
+#include "i915_gem_object_blt.h"
+#include "intel_drv.h"
+
+struct i915_sleeve {
+ struct i915_vma *vma;
+ struct drm_i915_gem_object *obj;
+ struct sg_table *pages;
+ struct i915_page_sizes page_sizes;
+};
+
+static int vma_set_pages(struct i915_vma *vma)
+{
+ struct i915_sleeve *sleeve = vma->private;
+
+ vma->pages = sleeve->pages;
+ vma->page_sizes = sleeve->page_sizes;
+
+ return 0;
+}
+
+static void vma_clear_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
+ vma->pages = NULL;
+}
+
+static int vma_bind(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ return vma->vm->vma_ops.bind_vma(vma, cache_level, flags);
+}
+
+static void vma_unbind(struct i915_vma *vma)
+{
+ vma->vm->vma_ops.unbind_vma(vma);
+}
+
+static const struct i915_vma_ops proxy_vma_ops = {
+ .set_pages = vma_set_pages,
+ .clear_pages = vma_clear_pages,
+ .bind_vma = vma_bind,
+ .unbind_vma = vma_unbind,
+};
+
+static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
+ struct drm_i915_gem_object *obj,
+ struct sg_table *pages,
+ struct i915_page_sizes *page_sizes)
+{
+ struct i915_sleeve *sleeve;
+ struct i915_vma *vma;
+ int err;
+
+ sleeve = kzalloc(sizeof(*sleeve), GFP_KERNEL);
+ if (!sleeve)
+ return ERR_PTR(-ENOMEM);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_free;
+ }
+
+ vma->private = sleeve;
+ vma->ops = &proxy_vma_ops;
+
+ sleeve->vma = vma;
+ sleeve->obj = i915_gem_object_get(obj);
+ sleeve->pages = pages;
+ sleeve->page_sizes = *page_sizes;
+
+ return sleeve;
+
+err_free:
+ kfree(sleeve);
+ return ERR_PTR(err);
+}
+
+static void destroy_sleeve(struct i915_sleeve *sleeve)
+{
+ i915_gem_object_put(sleeve->obj);
+ kfree(sleeve);
+}
+
+struct clear_pages_work {
+ struct dma_fence dma;
+ struct dma_fence_cb cb;
+ struct i915_sw_fence wait;
+ struct work_struct work;
+ struct irq_work irq_work;
+ struct i915_sleeve *sleeve;
+ struct intel_context *ce;
+ u32 value;
+};
+
+static const char *clear_pages_work_driver_name(struct dma_fence *fence)
+{
+ return DRIVER_NAME;
+}
+
+static const char *clear_pages_work_timeline_name(struct dma_fence *fence)
+{
+ return "clear";
+}
+
+static void clear_pages_work_release(struct dma_fence *fence)
+{
+ struct clear_pages_work *w = container_of(fence, typeof(*w), dma);
+
+ destroy_sleeve(w->sleeve);
+
+ i915_sw_fence_fini(&w->wait);
+
+ BUILD_BUG_ON(offsetof(typeof(*w), dma));
+ dma_fence_free(&w->dma);
+}
+
+static const struct dma_fence_ops clear_pages_work_ops = {
+ .get_driver_name = clear_pages_work_driver_name,
+ .get_timeline_name = clear_pages_work_timeline_name,
+ .release = clear_pages_work_release,
+};
+
+static void clear_pages_signal_irq_worker(struct irq_work *work)
+{
+ struct clear_pages_work *w = container_of(work, typeof(*w), irq_work);
+
+ dma_fence_signal(&w->dma);
+ dma_fence_put(&w->dma);
+}
+
+static void clear_pages_dma_fence_cb(struct dma_fence *fence,
+ struct dma_fence_cb *cb)
+{
+ struct clear_pages_work *w = container_of(cb, typeof(*w), cb);
+
+ if (fence->error)
+ dma_fence_set_error(&w->dma, fence->error);
+
+ /*
+ * Push the signalling of the fence into yet another worker to avoid
+ * the nightmare locking around the fence spinlock.
+ */
+ irq_work_queue(&w->irq_work);
+}
+
+static void clear_pages_worker(struct work_struct *work)
+{
+ struct clear_pages_work *w = container_of(work, typeof(*w), work);
+ struct drm_i915_private *i915 = w->ce->gem_context->i915;
+ struct drm_i915_gem_object *obj = w->sleeve->obj;
+ struct i915_vma *vma = w->sleeve->vma;
+ struct i915_request *rq;
+ int err = w->dma.error;
+
+ if (unlikely(err))
+ goto out_signal;
+
+ if (obj->cache_dirty) {
+ obj->write_domain = 0;
+ if (i915_gem_object_has_struct_page(obj))
+ drm_clflush_sg(w->sleeve->pages);
+ obj->cache_dirty = false;
+ }
+
+ /* XXX: we need to kill this */
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (unlikely(err))
+ goto out_unlock;
+
+ rq = i915_request_create(w->ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_unpin;
+ }
+
+ /* There's no way the fence has signalled */
+ if (dma_fence_add_callback(&rq->fence, &w->cb,
+ clear_pages_dma_fence_cb))
+ GEM_BUG_ON(1);
+
+ if (w->ce->engine->emit_init_breadcrumb) {
+ err = w->ce->engine->emit_init_breadcrumb(rq);
+ if (unlikely(err))
+ goto out_request;
+ }
+
+ /* XXX: more feverish nightmares await */
+ i915_vma_lock(vma);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+ if (err)
+ goto out_request;
+
+ err = intel_emit_vma_fill_blt(rq, vma, w->value);
+out_request:
+ if (unlikely(err)) {
+ i915_request_skip(rq, err);
+ err = 0;
+ }
+
+ i915_request_add(rq);
+out_unpin:
+ i915_vma_unpin(vma);
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+out_signal:
+ if (unlikely(err)) {
+ dma_fence_set_error(&w->dma, err);
+ dma_fence_signal(&w->dma);
+ dma_fence_put(&w->dma);
+ }
+}
+
+static int __i915_sw_fence_call
+clear_pages_work_notify(struct i915_sw_fence *fence,
+ enum i915_sw_fence_notify state)
+{
+ struct clear_pages_work *w = container_of(fence, typeof(*w), wait);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ schedule_work(&w->work);
+ break;
+
+ case FENCE_FREE:
+ dma_fence_put(&w->dma);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static DEFINE_SPINLOCK(fence_lock);
+
+/* XXX: better name please */
+int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
+ struct intel_context *ce,
+ struct sg_table *pages,
+ struct i915_page_sizes *page_sizes,
+ u32 value)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_gem_context *ctx = ce->gem_context;
+ struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+ struct clear_pages_work *work;
+ struct i915_sleeve *sleeve;
+ int err;
+
+ sleeve = create_sleeve(vm, obj, pages, page_sizes);
+ if (IS_ERR(sleeve))
+ return PTR_ERR(sleeve);
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work) {
+ destroy_sleeve(sleeve);
+ return -ENOMEM;
+ }
+
+ work->value = value;
+ work->sleeve = sleeve;
+ work->ce = ce;
+
+ INIT_WORK(&work->work, clear_pages_worker);
+
+ init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
+
+ dma_fence_init(&work->dma,
+ &clear_pages_work_ops,
+ &fence_lock,
+ i915->mm.unordered_timeline,
+ 0);
+ i915_sw_fence_init(&work->wait, clear_pages_work_notify);
+
+ i915_gem_object_lock(obj);
+ err = i915_sw_fence_await_reservation(&work->wait,
+ obj->base.resv, NULL,
+ true, I915_FENCE_TIMEOUT,
+ I915_FENCE_GFP);
+ if (err < 0) {
+ dma_fence_set_error(&work->dma, err);
+ } else {
+ reservation_object_add_excl_fence(obj->base.resv, &work->dma);
+ err = 0;
+ }
+ i915_gem_object_unlock(obj);
+
+ dma_fence_get(&work->dma);
+ i915_sw_fence_commit(&work->wait);
+
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_gem_client_blt.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.h b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.h
new file mode 100644
index 000000000000..3dbd28c22ff5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+#ifndef __I915_GEM_CLIENT_BLT_H__
+#define __I915_GEM_CLIENT_BLT_H__
+
+#include <linux/types.h>
+
+struct drm_i915_gem_object;
+struct i915_page_sizes;
+struct intel_context;
+struct sg_table;
+
+int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
+ struct intel_context *ce,
+ struct sg_table *pages,
+ struct i915_page_sizes *page_sizes,
+ u32 value);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index dd728b26b5aa..0f2c22a3bcb6 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1,28 +1,7 @@
/*
- * Copyright © 2011-2012 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Ben Widawsky <ben@bwidawsk.net>
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2011-2012 Intel Corporation
*/
/*
@@ -86,16 +65,16 @@
*/
#include <linux/log2.h>
+#include <linux/nospec.h>
+
#include <drm/i915_drm.h>
-#include "i915_drv.h"
+
+#include "gt/intel_lrc_reg.h"
+
+#include "i915_gem_context.h"
#include "i915_globals.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
-#include "intel_lrc_reg.h"
-#include "intel_workarounds.h"
-
-#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1 << 1)
-#define I915_CONTEXT_PARAM_VM 0x9
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
@@ -116,28 +95,77 @@ void i915_lut_handle_free(struct i915_lut_handle *lut)
static void lut_close(struct i915_gem_context *ctx)
{
- struct i915_lut_handle *lut, *ln;
struct radix_tree_iter iter;
void __rcu **slot;
- list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
- list_del(&lut->obj_link);
- i915_lut_handle_free(lut);
- }
- INIT_LIST_HEAD(&ctx->handles_list);
+ lockdep_assert_held(&ctx->mutex);
rcu_read_lock();
radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
struct i915_vma *vma = rcu_dereference_raw(*slot);
+ struct drm_i915_gem_object *obj = vma->obj;
+ struct i915_lut_handle *lut;
+
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
+
+ rcu_read_unlock();
+ i915_gem_object_lock(obj);
+ list_for_each_entry(lut, &obj->lut_list, obj_link) {
+ if (lut->ctx != ctx)
+ continue;
- radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
+ if (lut->handle != iter.index)
+ continue;
- vma->open_count--;
- __i915_gem_object_release_unless_active(vma->obj);
+ list_del(&lut->obj_link);
+ break;
+ }
+ i915_gem_object_unlock(obj);
+ rcu_read_lock();
+
+ if (&lut->obj_link != &obj->lut_list) {
+ i915_lut_handle_free(lut);
+ radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
+ if (atomic_dec_and_test(&vma->open_count) &&
+ !i915_vma_is_ggtt(vma))
+ i915_vma_close(vma);
+ i915_gem_object_put(obj);
+ }
+
+ i915_gem_object_put(obj);
}
rcu_read_unlock();
}
+static struct intel_context *
+lookup_user_engine(struct i915_gem_context *ctx,
+ unsigned long flags,
+ const struct i915_engine_class_instance *ci)
+#define LOOKUP_USER_INDEX BIT(0)
+{
+ int idx;
+
+ if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
+ return ERR_PTR(-EINVAL);
+
+ if (!i915_gem_context_user_engines(ctx)) {
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(ctx->i915,
+ ci->engine_class,
+ ci->engine_instance);
+ if (!engine)
+ return ERR_PTR(-EINVAL);
+
+ idx = engine->id;
+ } else {
+ idx = ci->engine_instance;
+ }
+
+ return i915_gem_context_get_engine(ctx, idx);
+}
+
static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
{
unsigned int max;
@@ -227,19 +255,65 @@ static void release_hw_id(struct i915_gem_context *ctx)
mutex_unlock(&i915->contexts.mutex);
}
-static void i915_gem_context_free(struct i915_gem_context *ctx)
+static void __free_engines(struct i915_gem_engines *e, unsigned int count)
+{
+ while (count--) {
+ if (!e->engines[count])
+ continue;
+
+ intel_context_put(e->engines[count]);
+ }
+ kfree(e);
+}
+
+static void free_engines(struct i915_gem_engines *e)
{
- struct intel_context *it, *n;
+ __free_engines(e, e->num_engines);
+}
+
+static void free_engines_rcu(struct rcu_head *rcu)
+{
+ free_engines(container_of(rcu, struct i915_gem_engines, rcu));
+}
+
+static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
+{
+ struct intel_engine_cs *engine;
+ struct i915_gem_engines *e;
+ enum intel_engine_id id;
+
+ e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
+ if (!e)
+ return ERR_PTR(-ENOMEM);
+
+ init_rcu_head(&e->rcu);
+ for_each_engine(engine, ctx->i915, id) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(ctx, engine);
+ if (IS_ERR(ce)) {
+ __free_engines(e, id);
+ return ERR_CAST(ce);
+ }
+
+ e->engines[id] = ce;
+ }
+ e->num_engines = id;
+
+ return e;
+}
+static void i915_gem_context_free(struct i915_gem_context *ctx)
+{
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
- GEM_BUG_ON(!list_empty(&ctx->active_engines));
release_hw_id(ctx);
- i915_ppgtt_put(ctx->ppgtt);
+ if (ctx->vm)
+ i915_vm_put(ctx->vm);
- rbtree_postorder_for_each_entry_safe(it, n, &ctx->hw_contexts, node)
- intel_context_put(it);
+ free_engines(rcu_access_pointer(ctx->engines));
+ mutex_destroy(&ctx->engines_mutex);
if (ctx->timeline)
i915_timeline_put(ctx->timeline);
@@ -301,7 +375,10 @@ void i915_gem_context_release(struct kref *ref)
static void context_close(struct i915_gem_context *ctx)
{
+ mutex_lock(&ctx->mutex);
+
i915_gem_context_set_closed(ctx);
+ ctx->file_priv = ERR_PTR(-EBADF);
/*
* This context will never again be assinged to HW, so we can
@@ -316,12 +393,12 @@ static void context_close(struct i915_gem_context *ctx)
*/
lut_close(ctx);
- ctx->file_priv = ERR_PTR(-EBADF);
+ mutex_unlock(&ctx->mutex);
i915_gem_context_put(ctx);
}
static u32 default_desc_template(const struct drm_i915_private *i915,
- const struct i915_hw_ppgtt *ppgtt)
+ const struct i915_address_space *vm)
{
u32 address_mode;
u32 desc;
@@ -329,7 +406,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
address_mode = INTEL_LEGACY_32B_CONTEXT;
- if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm))
+ if (vm && i915_vm_is_4lvl(vm))
address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
@@ -345,9 +422,11 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
}
static struct i915_gem_context *
-__create_context(struct drm_i915_private *dev_priv)
+__create_context(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx;
+ struct i915_gem_engines *e;
+ int err;
int i;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -355,57 +434,64 @@ __create_context(struct drm_i915_private *dev_priv)
return ERR_PTR(-ENOMEM);
kref_init(&ctx->ref);
- list_add_tail(&ctx->link, &dev_priv->contexts.list);
- ctx->i915 = dev_priv;
+ list_add_tail(&ctx->link, &i915->contexts.list);
+ ctx->i915 = i915;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
- INIT_LIST_HEAD(&ctx->active_engines);
mutex_init(&ctx->mutex);
- ctx->hw_contexts = RB_ROOT;
- spin_lock_init(&ctx->hw_contexts_lock);
+ mutex_init(&ctx->engines_mutex);
+ e = default_engines(ctx);
+ if (IS_ERR(e)) {
+ err = PTR_ERR(e);
+ goto err_free;
+ }
+ RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
- INIT_LIST_HEAD(&ctx->handles_list);
INIT_LIST_HEAD(&ctx->hw_id_link);
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
- ctx->remap_slice = ALL_L3_SLICES(dev_priv);
+ ctx->remap_slice = ALL_L3_SLICES(i915);
i915_gem_context_set_bannable(ctx);
i915_gem_context_set_recoverable(ctx);
ctx->ring_size = 4 * PAGE_SIZE;
ctx->desc_template =
- default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
+ default_desc_template(i915, &i915->mm.aliasing_ppgtt->vm);
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
return ctx;
+
+err_free:
+ kfree(ctx);
+ return ERR_PTR(err);
}
-static struct i915_hw_ppgtt *
-__set_ppgtt(struct i915_gem_context *ctx, struct i915_hw_ppgtt *ppgtt)
+static struct i915_address_space *
+__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
{
- struct i915_hw_ppgtt *old = ctx->ppgtt;
+ struct i915_address_space *old = ctx->vm;
- ctx->ppgtt = i915_ppgtt_get(ppgtt);
- ctx->desc_template = default_desc_template(ctx->i915, ppgtt);
+ ctx->vm = i915_vm_get(vm);
+ ctx->desc_template = default_desc_template(ctx->i915, vm);
return old;
}
static void __assign_ppgtt(struct i915_gem_context *ctx,
- struct i915_hw_ppgtt *ppgtt)
+ struct i915_address_space *vm)
{
- if (ppgtt == ctx->ppgtt)
+ if (vm == ctx->vm)
return;
- ppgtt = __set_ppgtt(ctx, ppgtt);
- if (ppgtt)
- i915_ppgtt_put(ppgtt);
+ vm = __set_ppgtt(ctx, vm);
+ if (vm)
+ i915_vm_put(vm);
}
static struct i915_gem_context *
@@ -415,8 +501,6 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- BUILD_BUG_ON(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &
- ~I915_CONTEXT_CREATE_FLAGS_UNKNOWN);
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
!HAS_EXECLISTS(dev_priv))
return ERR_PTR(-EINVAL);
@@ -429,7 +513,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
return ctx;
if (HAS_FULL_PPGTT(dev_priv)) {
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
@@ -439,8 +523,8 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
return ERR_CAST(ppgtt);
}
- __assign_ppgtt(ctx, ppgtt);
- i915_ppgtt_put(ppgtt);
+ __assign_ppgtt(ctx, &ppgtt->vm);
+ i915_vm_put(&ppgtt->vm);
}
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
@@ -608,17 +692,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
return 0;
}
-void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- for_each_engine(engine, dev_priv, id)
- intel_engine_lost_context(engine);
-}
-
void i915_gem_contexts_fini(struct drm_i915_private *i915)
{
lockdep_assert_held(&i915->drm.struct_mutex);
@@ -640,7 +713,7 @@ static int context_idr_cleanup(int id, void *p, void *data)
static int vm_idr_cleanup(int id, void *p, void *data)
{
- i915_ppgtt_put(p);
+ i915_vm_put(p);
return 0;
}
@@ -650,8 +723,8 @@ static int gem_context_register(struct i915_gem_context *ctx,
int ret;
ctx->file_priv = fpriv;
- if (ctx->ppgtt)
- ctx->ppgtt->vm.file = fpriv;
+ if (ctx->vm)
+ ctx->vm->file = fpriv;
ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
@@ -706,9 +779,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
return 0;
err_ctx:
- mutex_lock(&i915->drm.struct_mutex);
context_close(ctx);
- mutex_unlock(&i915->drm.struct_mutex);
err:
idr_destroy(&file_priv->vm_idr);
idr_destroy(&file_priv->context_idr);
@@ -721,8 +792,6 @@ void i915_gem_context_close(struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
-
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
mutex_destroy(&file_priv->context_idr_lock);
@@ -738,7 +807,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_vm_control *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
int err;
if (!HAS_FULL_PPGTT(i915))
@@ -765,12 +834,11 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
if (err)
goto err_put;
- err = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
+ err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
if (err < 0)
goto err_unlock;
- GEM_BUG_ON(err == 0); /* reserved for default/unassigned ppgtt */
- ppgtt->user_handle = err;
+ GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
mutex_unlock(&file_priv->vm_idr_lock);
@@ -780,7 +848,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
err_unlock:
mutex_unlock(&file_priv->vm_idr_lock);
err_put:
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(&ppgtt->vm);
return err;
}
@@ -789,7 +857,7 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_vm_control *args = data;
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_address_space *vm;
int err;
u32 id;
@@ -807,40 +875,16 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
if (err)
return err;
- ppgtt = idr_remove(&file_priv->vm_idr, id);
- if (ppgtt) {
- GEM_BUG_ON(ppgtt->user_handle != id);
- ppgtt->user_handle = 0;
- }
+ vm = idr_remove(&file_priv->vm_idr, id);
mutex_unlock(&file_priv->vm_idr_lock);
- if (!ppgtt)
+ if (!vm)
return -ENOENT;
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(vm);
return 0;
}
-static struct i915_request *
-last_request_on_engine(struct i915_timeline *timeline,
- struct intel_engine_cs *engine)
-{
- struct i915_request *rq;
-
- GEM_BUG_ON(timeline == &engine->timeline);
-
- rq = i915_active_request_raw(&timeline->last_request,
- &engine->i915->drm.struct_mutex);
- if (rq && rq->engine->mask & engine->mask) {
- GEM_TRACE("last request on engine %s: %llx:%llu\n",
- engine->name, rq->fence.context, rq->fence.seqno);
- GEM_BUG_ON(rq->timeline != timeline);
- return rq;
- }
-
- return NULL;
-}
-
struct context_barrier_task {
struct i915_active base;
void (*task)(void *data);
@@ -861,14 +905,15 @@ static void cb_retire(struct i915_active *base)
I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
static int context_barrier_task(struct i915_gem_context *ctx,
intel_engine_mask_t engines,
+ bool (*skip)(struct intel_context *ce, void *data),
int (*emit)(struct i915_request *rq, void *data),
void (*task)(void *data),
void *data)
{
struct drm_i915_private *i915 = ctx->i915;
struct context_barrier_task *cb;
- struct intel_context *ce, *next;
- intel_wakeref_t wakeref;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
int err = 0;
lockdep_assert_held(&i915->drm.struct_mutex);
@@ -881,21 +926,22 @@ static int context_barrier_task(struct i915_gem_context *ctx,
i915_active_init(i915, &cb->base, cb_retire);
i915_active_acquire(&cb->base);
- wakeref = intel_runtime_pm_get(i915);
- rbtree_postorder_for_each_entry_safe(ce, next, &ctx->hw_contexts, node) {
- struct intel_engine_cs *engine = ce->engine;
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
struct i915_request *rq;
- if (!(engine->mask & engines))
- continue;
-
if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
- engine->mask)) {
+ ce->engine->mask)) {
err = -ENXIO;
break;
}
- rq = i915_request_alloc(engine, ctx);
+ if (!(ce->engine->mask & engines))
+ continue;
+
+ if (skip && skip(ce, data))
+ continue;
+
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
@@ -911,7 +957,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (err)
break;
}
- intel_runtime_pm_put(i915, wakeref);
+ i915_gem_context_unlock_engines(ctx);
cb->task = err ? NULL : task; /* caller needs to unwind instead */
cb->data = data;
@@ -921,64 +967,14 @@ static int context_barrier_task(struct i915_gem_context *ctx,
return err;
}
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
- intel_engine_mask_t mask)
-{
- struct intel_engine_cs *engine;
-
- GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
-
- lockdep_assert_held(&i915->drm.struct_mutex);
- GEM_BUG_ON(!i915->kernel_context);
-
- /* Inoperable, so presume the GPU is safely pointing into the void! */
- if (i915_terminally_wedged(i915))
- return 0;
-
- for_each_engine_masked(engine, i915, mask, mask) {
- struct intel_ring *ring;
- struct i915_request *rq;
-
- rq = i915_request_alloc(engine, i915->kernel_context);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- /* Queue this switch after all other activity */
- list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
- struct i915_request *prev;
-
- prev = last_request_on_engine(ring->timeline, engine);
- if (!prev)
- continue;
-
- if (prev->gem_context == i915->kernel_context)
- continue;
-
- GEM_TRACE("add barrier on %s for %llx:%lld\n",
- engine->name,
- prev->fence.context,
- prev->fence.seqno);
- i915_sw_fence_await_sw_fence_gfp(&rq->submit,
- &prev->submit,
- I915_FENCE_GFP);
- }
-
- i915_request_add(rq);
- }
-
- return 0;
-}
-
static int get_ppgtt(struct drm_i915_file_private *file_priv,
struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
{
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_address_space *vm;
int ret;
- return -EINVAL; /* nothing to see here; please move along */
-
- if (!ctx->ppgtt)
+ if (!ctx->vm)
return -ENODEV;
/* XXX rcu acquire? */
@@ -986,54 +982,52 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
if (ret)
return ret;
- ppgtt = i915_ppgtt_get(ctx->ppgtt);
+ vm = i915_vm_get(ctx->vm);
mutex_unlock(&ctx->i915->drm.struct_mutex);
ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
if (ret)
goto err_put;
- if (!ppgtt->user_handle) {
- ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
- GEM_BUG_ON(!ret);
- if (ret < 0)
- goto err_unlock;
+ ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
+ GEM_BUG_ON(!ret);
+ if (ret < 0)
+ goto err_unlock;
- ppgtt->user_handle = ret;
- i915_ppgtt_get(ppgtt);
- }
+ i915_vm_get(vm);
args->size = 0;
- args->value = ppgtt->user_handle;
+ args->value = ret;
ret = 0;
err_unlock:
mutex_unlock(&file_priv->vm_idr_lock);
err_put:
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(vm);
return ret;
}
static void set_ppgtt_barrier(void *data)
{
- struct i915_hw_ppgtt *old = data;
+ struct i915_address_space *old = data;
- if (INTEL_GEN(old->vm.i915) < 8)
- gen6_ppgtt_unpin_all(old);
+ if (INTEL_GEN(old->i915) < 8)
+ gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
- i915_ppgtt_put(old);
+ i915_vm_put(old);
}
static int emit_ppgtt_update(struct i915_request *rq, void *data)
{
- struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
+ struct i915_address_space *vm = rq->gem_context->vm;
struct intel_engine_cs *engine = rq->engine;
u32 base = engine->mmio_base;
u32 *cs;
int i;
- if (i915_vm_is_4lvl(&ppgtt->vm)) {
- const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4);
+ if (i915_vm_is_4lvl(vm)) {
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -1049,6 +1043,8 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
} else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1066,25 +1062,31 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
intel_ring_advance(rq, cs);
} else {
/* ppGTT is not part of the legacy context image */
- gen6_ppgtt_pin(ppgtt);
+ gen6_ppgtt_pin(i915_vm_to_ppgtt(vm));
}
return 0;
}
+static bool skip_ppgtt_update(struct intel_context *ce, void *data)
+{
+ if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
+ return !ce->state;
+ else
+ return !atomic_read(&ce->pin_count);
+}
+
static int set_ppgtt(struct drm_i915_file_private *file_priv,
struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
{
- struct i915_hw_ppgtt *ppgtt, *old;
+ struct i915_address_space *vm, *old;
int err;
- return -EINVAL; /* nothing to see here; please move along */
-
if (args->size)
return -EINVAL;
- if (!ctx->ppgtt)
+ if (!ctx->vm)
return -ENODEV;
if (upper_32_bits(args->value))
@@ -1094,26 +1096,26 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
if (err)
return err;
- ppgtt = idr_find(&file_priv->vm_idr, args->value);
- if (ppgtt) {
- GEM_BUG_ON(ppgtt->user_handle != args->value);
- i915_ppgtt_get(ppgtt);
- }
+ vm = idr_find(&file_priv->vm_idr, args->value);
+ if (vm)
+ i915_vm_get(vm);
mutex_unlock(&file_priv->vm_idr_lock);
- if (!ppgtt)
+ if (!vm)
return -ENOENT;
err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
if (err)
goto out;
- if (ppgtt == ctx->ppgtt)
+ if (vm == ctx->vm)
goto unlock;
/* Teardown the existing obj:vma cache, it will have to be rebuilt. */
+ mutex_lock(&ctx->mutex);
lut_close(ctx);
+ mutex_unlock(&ctx->mutex);
- old = __set_ppgtt(ctx, ppgtt);
+ old = __set_ppgtt(ctx, vm);
/*
* We need to flush any requests using the current ppgtt before
@@ -1121,20 +1123,21 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
* only indirectly through the context.
*/
err = context_barrier_task(ctx, ALL_ENGINES,
+ skip_ppgtt_update,
emit_ppgtt_update,
set_ppgtt_barrier,
old);
if (err) {
- ctx->ppgtt = old;
+ ctx->vm = old;
ctx->desc_template = default_desc_template(ctx->i915, old);
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(vm);
}
unlock:
mutex_unlock(&ctx->i915->drm.struct_mutex);
out:
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(vm);
return err;
}
@@ -1156,7 +1159,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
- *cs++ = gen8_make_rpcs(rq->i915, &sseu);
+ *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
intel_ring_advance(rq, cs);
@@ -1166,9 +1169,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
static int
gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
{
- struct drm_i915_private *i915 = ce->engine->i915;
struct i915_request *rq;
- intel_wakeref_t wakeref;
int ret;
lockdep_assert_held(&ce->pin_mutex);
@@ -1182,24 +1183,15 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
if (!intel_context_is_pinned(ce))
return 0;
- /* Submitting requests etc needs the hw awake. */
- wakeref = intel_runtime_pm_get(i915);
-
- rq = i915_request_alloc(ce->engine, i915->kernel_context);
- if (IS_ERR(rq)) {
- ret = PTR_ERR(rq);
- goto out_put;
- }
+ rq = i915_request_create(ce->engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
/* Queue this switch after all other activity by this context. */
ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
if (ret)
goto out_add;
- ret = gen8_emit_rpcs_config(rq, ce, sseu);
- if (ret)
- goto out_add;
-
/*
* Guarantee context image and the timeline remains pinned until the
* modifying request is retired by setting the ce activity tracker.
@@ -1207,32 +1199,29 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
* But we only need to take one pin on the account of it. Or in other
* words transfer the pinned ce object to tracked active request.
*/
- if (!i915_active_request_isset(&ce->active_tracker))
- __intel_context_pin(ce);
- __i915_active_request_set(&ce->active_tracker, rq);
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ ret = i915_active_ref(&ce->active, rq->fence.context, rq);
+ if (ret)
+ goto out_add;
+
+ ret = gen8_emit_rpcs_config(rq, ce, sseu);
out_add:
i915_request_add(rq);
-out_put:
- intel_runtime_pm_put(i915, wakeref);
-
return ret;
}
static int
-__i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_sseu sseu)
+__intel_context_reconfigure_sseu(struct intel_context *ce,
+ struct intel_sseu sseu)
{
- struct intel_context *ce;
- int ret = 0;
+ int ret;
- GEM_BUG_ON(INTEL_GEN(ctx->i915) < 8);
- GEM_BUG_ON(engine->id != RCS0);
+ GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8);
- ce = intel_context_pin_lock(ctx, engine);
- if (IS_ERR(ce))
- return PTR_ERR(ce);
+ ret = intel_context_lock_pinned(ce);
+ if (ret)
+ return ret;
/* Nothing to do if unmodified. */
if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
@@ -1243,24 +1232,23 @@ __i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
ce->sseu = sseu;
unlock:
- intel_context_pin_unlock(ce);
+ intel_context_unlock_pinned(ce);
return ret;
}
static int
-i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_sseu sseu)
+intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
{
+ struct drm_i915_private *i915 = ce->gem_context->i915;
int ret;
- ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
+ ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
return ret;
- ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
+ ret = __intel_context_reconfigure_sseu(ce, sseu);
- mutex_unlock(&ctx->i915->drm.struct_mutex);
+ mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
@@ -1368,8 +1356,9 @@ static int set_sseu(struct i915_gem_context *ctx,
{
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_context_param_sseu user_sseu;
- struct intel_engine_cs *engine;
+ struct intel_context *ce;
struct intel_sseu sseu;
+ unsigned long lookup;
int ret;
if (args->size < sizeof(user_sseu))
@@ -1382,32 +1371,427 @@ static int set_sseu(struct i915_gem_context *ctx,
sizeof(user_sseu)))
return -EFAULT;
- if (user_sseu.flags || user_sseu.rsvd)
+ if (user_sseu.rsvd)
return -EINVAL;
- engine = intel_engine_lookup_user(i915,
- user_sseu.engine.engine_class,
- user_sseu.engine.engine_instance);
- if (!engine)
+ if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
return -EINVAL;
+ lookup = 0;
+ if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
+ lookup |= LOOKUP_USER_INDEX;
+
+ ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
/* Only render engine supports RPCS configuration. */
- if (engine->class != RENDER_CLASS)
- return -ENODEV;
+ if (ce->engine->class != RENDER_CLASS) {
+ ret = -ENODEV;
+ goto out_ce;
+ }
ret = user_to_context_sseu(i915, &user_sseu, &sseu);
if (ret)
- return ret;
+ goto out_ce;
- ret = i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
+ ret = intel_context_reconfigure_sseu(ce, sseu);
if (ret)
- return ret;
+ goto out_ce;
args->size = sizeof(user_sseu);
+out_ce:
+ intel_context_put(ce);
+ return ret;
+}
+
+struct set_engines {
+ struct i915_gem_context *ctx;
+ struct i915_gem_engines *engines;
+};
+
+static int
+set_engines__load_balance(struct i915_user_extension __user *base, void *data)
+{
+ struct i915_context_engines_load_balance __user *ext =
+ container_of_user(base, typeof(*ext), base);
+ const struct set_engines *set = data;
+ struct intel_engine_cs *stack[16];
+ struct intel_engine_cs **siblings;
+ struct intel_context *ce;
+ u16 num_siblings, idx;
+ unsigned int n;
+ int err;
+
+ if (!HAS_EXECLISTS(set->ctx->i915))
+ return -ENODEV;
+
+ if (USES_GUC_SUBMISSION(set->ctx->i915))
+ return -ENODEV; /* not implement yet */
+
+ if (get_user(idx, &ext->engine_index))
+ return -EFAULT;
+
+ if (idx >= set->engines->num_engines) {
+ DRM_DEBUG("Invalid placement value, %d >= %d\n",
+ idx, set->engines->num_engines);
+ return -EINVAL;
+ }
+
+ idx = array_index_nospec(idx, set->engines->num_engines);
+ if (set->engines->engines[idx]) {
+ DRM_DEBUG("Invalid placement[%d], already occupied\n", idx);
+ return -EEXIST;
+ }
+
+ if (get_user(num_siblings, &ext->num_siblings))
+ return -EFAULT;
+
+ err = check_user_mbz(&ext->flags);
+ if (err)
+ return err;
+
+ err = check_user_mbz(&ext->mbz64);
+ if (err)
+ return err;
+
+ siblings = stack;
+ if (num_siblings > ARRAY_SIZE(stack)) {
+ siblings = kmalloc_array(num_siblings,
+ sizeof(*siblings),
+ GFP_KERNEL);
+ if (!siblings)
+ return -ENOMEM;
+ }
+
+ for (n = 0; n < num_siblings; n++) {
+ struct i915_engine_class_instance ci;
+
+ if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
+ err = -EFAULT;
+ goto out_siblings;
+ }
+
+ siblings[n] = intel_engine_lookup_user(set->ctx->i915,
+ ci.engine_class,
+ ci.engine_instance);
+ if (!siblings[n]) {
+ DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n",
+ n, ci.engine_class, ci.engine_instance);
+ err = -EINVAL;
+ goto out_siblings;
+ }
+ }
+
+ ce = intel_execlists_create_virtual(set->ctx, siblings, n);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_siblings;
+ }
+
+ if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
+ intel_context_put(ce);
+ err = -EEXIST;
+ goto out_siblings;
+ }
+
+out_siblings:
+ if (siblings != stack)
+ kfree(siblings);
+
+ return err;
+}
+
+static int
+set_engines__bond(struct i915_user_extension __user *base, void *data)
+{
+ struct i915_context_engines_bond __user *ext =
+ container_of_user(base, typeof(*ext), base);
+ const struct set_engines *set = data;
+ struct i915_engine_class_instance ci;
+ struct intel_engine_cs *virtual;
+ struct intel_engine_cs *master;
+ u16 idx, num_bonds;
+ int err, n;
+
+ if (get_user(idx, &ext->virtual_index))
+ return -EFAULT;
+
+ if (idx >= set->engines->num_engines) {
+ DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n",
+ idx, set->engines->num_engines);
+ return -EINVAL;
+ }
+
+ idx = array_index_nospec(idx, set->engines->num_engines);
+ if (!set->engines->engines[idx]) {
+ DRM_DEBUG("Invalid engine at %d\n", idx);
+ return -EINVAL;
+ }
+ virtual = set->engines->engines[idx]->engine;
+
+ err = check_user_mbz(&ext->flags);
+ if (err)
+ return err;
+
+ for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
+ err = check_user_mbz(&ext->mbz64[n]);
+ if (err)
+ return err;
+ }
+
+ if (copy_from_user(&ci, &ext->master, sizeof(ci)))
+ return -EFAULT;
+
+ master = intel_engine_lookup_user(set->ctx->i915,
+ ci.engine_class, ci.engine_instance);
+ if (!master) {
+ DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n",
+ ci.engine_class, ci.engine_instance);
+ return -EINVAL;
+ }
+
+ if (get_user(num_bonds, &ext->num_bonds))
+ return -EFAULT;
+
+ for (n = 0; n < num_bonds; n++) {
+ struct intel_engine_cs *bond;
+
+ if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
+ return -EFAULT;
+
+ bond = intel_engine_lookup_user(set->ctx->i915,
+ ci.engine_class,
+ ci.engine_instance);
+ if (!bond) {
+ DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
+ n, ci.engine_class, ci.engine_instance);
+ return -EINVAL;
+ }
+
+ /*
+ * A non-virtual engine has no siblings to choose between; and
+ * a submit fence will always be directed to the one engine.
+ */
+ if (intel_engine_is_virtual(virtual)) {
+ err = intel_virtual_engine_attach_bond(virtual,
+ master,
+ bond);
+ if (err)
+ return err;
+ }
+ }
+
return 0;
}
+static const i915_user_extension_fn set_engines__extensions[] = {
+ [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
+ [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
+};
+
+static int
+set_engines(struct i915_gem_context *ctx,
+ const struct drm_i915_gem_context_param *args)
+{
+ struct i915_context_param_engines __user *user =
+ u64_to_user_ptr(args->value);
+ struct set_engines set = { .ctx = ctx };
+ unsigned int num_engines, n;
+ u64 extensions;
+ int err;
+
+ if (!args->size) { /* switch back to legacy user_ring_map */
+ if (!i915_gem_context_user_engines(ctx))
+ return 0;
+
+ set.engines = default_engines(ctx);
+ if (IS_ERR(set.engines))
+ return PTR_ERR(set.engines);
+
+ goto replace;
+ }
+
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
+ if (args->size < sizeof(*user) ||
+ !IS_ALIGNED(args->size, sizeof(*user->engines))) {
+ DRM_DEBUG("Invalid size for engine array: %d\n",
+ args->size);
+ return -EINVAL;
+ }
+
+ /*
+ * Note that I915_EXEC_RING_MASK limits execbuf to only using the
+ * first 64 engines defined here.
+ */
+ num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
+
+ set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
+ GFP_KERNEL);
+ if (!set.engines)
+ return -ENOMEM;
+
+ init_rcu_head(&set.engines->rcu);
+ for (n = 0; n < num_engines; n++) {
+ struct i915_engine_class_instance ci;
+ struct intel_engine_cs *engine;
+
+ if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
+ __free_engines(set.engines, n);
+ return -EFAULT;
+ }
+
+ if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
+ ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
+ set.engines->engines[n] = NULL;
+ continue;
+ }
+
+ engine = intel_engine_lookup_user(ctx->i915,
+ ci.engine_class,
+ ci.engine_instance);
+ if (!engine) {
+ DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n",
+ n, ci.engine_class, ci.engine_instance);
+ __free_engines(set.engines, n);
+ return -ENOENT;
+ }
+
+ set.engines->engines[n] = intel_context_create(ctx, engine);
+ if (!set.engines->engines[n]) {
+ __free_engines(set.engines, n);
+ return -ENOMEM;
+ }
+ }
+ set.engines->num_engines = num_engines;
+
+ err = -EFAULT;
+ if (!get_user(extensions, &user->extensions))
+ err = i915_user_extensions(u64_to_user_ptr(extensions),
+ set_engines__extensions,
+ ARRAY_SIZE(set_engines__extensions),
+ &set);
+ if (err) {
+ free_engines(set.engines);
+ return err;
+ }
+
+replace:
+ mutex_lock(&ctx->engines_mutex);
+ if (args->size)
+ i915_gem_context_set_user_engines(ctx);
+ else
+ i915_gem_context_clear_user_engines(ctx);
+ rcu_swap_protected(ctx->engines, set.engines, 1);
+ mutex_unlock(&ctx->engines_mutex);
+
+ call_rcu(&set.engines->rcu, free_engines_rcu);
+
+ return 0;
+}
+
+static struct i915_gem_engines *
+__copy_engines(struct i915_gem_engines *e)
+{
+ struct i915_gem_engines *copy;
+ unsigned int n;
+
+ copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
+ if (!copy)
+ return ERR_PTR(-ENOMEM);
+
+ init_rcu_head(&copy->rcu);
+ for (n = 0; n < e->num_engines; n++) {
+ if (e->engines[n])
+ copy->engines[n] = intel_context_get(e->engines[n]);
+ else
+ copy->engines[n] = NULL;
+ }
+ copy->num_engines = n;
+
+ return copy;
+}
+
+static int
+get_engines(struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
+{
+ struct i915_context_param_engines __user *user;
+ struct i915_gem_engines *e;
+ size_t n, count, size;
+ int err = 0;
+
+ err = mutex_lock_interruptible(&ctx->engines_mutex);
+ if (err)
+ return err;
+
+ e = NULL;
+ if (i915_gem_context_user_engines(ctx))
+ e = __copy_engines(i915_gem_context_engines(ctx));
+ mutex_unlock(&ctx->engines_mutex);
+ if (IS_ERR_OR_NULL(e)) {
+ args->size = 0;
+ return PTR_ERR_OR_ZERO(e);
+ }
+
+ count = e->num_engines;
+
+ /* Be paranoid in case we have an impedance mismatch */
+ if (!check_struct_size(user, engines, count, &size)) {
+ err = -EINVAL;
+ goto err_free;
+ }
+ if (overflows_type(size, args->size)) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ if (!args->size) {
+ args->size = size;
+ goto err_free;
+ }
+
+ if (args->size < size) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ user = u64_to_user_ptr(args->value);
+ if (!access_ok(user, size)) {
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ if (put_user(0, &user->extensions)) {
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ for (n = 0; n < count; n++) {
+ struct i915_engine_class_instance ci = {
+ .engine_class = I915_ENGINE_CLASS_INVALID,
+ .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
+ };
+
+ if (e->engines[n]) {
+ ci.engine_class = e->engines[n]->engine->uabi_class;
+ ci.engine_instance = e->engines[n]->engine->instance;
+ }
+
+ if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
+ err = -EFAULT;
+ goto err_free;
+ }
+ }
+
+ args->size = size;
+
+err_free:
+ free_engines(e);
+ return err;
+}
+
static int ctx_setparam(struct drm_i915_file_private *fpriv,
struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
@@ -1481,6 +1865,10 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
ret = set_ppgtt(fpriv, ctx, args);
break;
+ case I915_CONTEXT_PARAM_ENGINES:
+ ret = set_engines(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -1509,8 +1897,229 @@ static int create_setparam(struct i915_user_extension __user *ext, void *data)
return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
}
+static int clone_engines(struct i915_gem_context *dst,
+ struct i915_gem_context *src)
+{
+ struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
+ struct i915_gem_engines *clone;
+ bool user_engines;
+ unsigned long n;
+
+ clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
+ if (!clone)
+ goto err_unlock;
+
+ init_rcu_head(&clone->rcu);
+ for (n = 0; n < e->num_engines; n++) {
+ struct intel_engine_cs *engine;
+
+ if (!e->engines[n]) {
+ clone->engines[n] = NULL;
+ continue;
+ }
+ engine = e->engines[n]->engine;
+
+ /*
+ * Virtual engines are singletons; they can only exist
+ * inside a single context, because they embed their
+ * HW context... As each virtual context implies a single
+ * timeline (each engine can only dequeue a single request
+ * at any time), it would be surprising for two contexts
+ * to use the same engine. So let's create a copy of
+ * the virtual engine instead.
+ */
+ if (intel_engine_is_virtual(engine))
+ clone->engines[n] =
+ intel_execlists_clone_virtual(dst, engine);
+ else
+ clone->engines[n] = intel_context_create(dst, engine);
+ if (IS_ERR_OR_NULL(clone->engines[n])) {
+ __free_engines(clone, n);
+ goto err_unlock;
+ }
+ }
+ clone->num_engines = n;
+
+ user_engines = i915_gem_context_user_engines(src);
+ i915_gem_context_unlock_engines(src);
+
+ free_engines(dst->engines);
+ RCU_INIT_POINTER(dst->engines, clone);
+ if (user_engines)
+ i915_gem_context_set_user_engines(dst);
+ else
+ i915_gem_context_clear_user_engines(dst);
+ return 0;
+
+err_unlock:
+ i915_gem_context_unlock_engines(src);
+ return -ENOMEM;
+}
+
+static int clone_flags(struct i915_gem_context *dst,
+ struct i915_gem_context *src)
+{
+ dst->user_flags = src->user_flags;
+ return 0;
+}
+
+static int clone_schedattr(struct i915_gem_context *dst,
+ struct i915_gem_context *src)
+{
+ dst->sched = src->sched;
+ return 0;
+}
+
+static int clone_sseu(struct i915_gem_context *dst,
+ struct i915_gem_context *src)
+{
+ struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
+ struct i915_gem_engines *clone;
+ unsigned long n;
+ int err;
+
+ clone = dst->engines; /* no locking required; sole access */
+ if (e->num_engines != clone->num_engines) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ for (n = 0; n < e->num_engines; n++) {
+ struct intel_context *ce = e->engines[n];
+
+ if (clone->engines[n]->engine->class != ce->engine->class) {
+ /* Must have compatible engine maps! */
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ /* serialises with set_sseu */
+ err = intel_context_lock_pinned(ce);
+ if (err)
+ goto unlock;
+
+ clone->engines[n]->sseu = ce->sseu;
+ intel_context_unlock_pinned(ce);
+ }
+
+ err = 0;
+unlock:
+ i915_gem_context_unlock_engines(src);
+ return err;
+}
+
+static int clone_timeline(struct i915_gem_context *dst,
+ struct i915_gem_context *src)
+{
+ if (src->timeline) {
+ GEM_BUG_ON(src->timeline == dst->timeline);
+
+ if (dst->timeline)
+ i915_timeline_put(dst->timeline);
+ dst->timeline = i915_timeline_get(src->timeline);
+ }
+
+ return 0;
+}
+
+static int clone_vm(struct i915_gem_context *dst,
+ struct i915_gem_context *src)
+{
+ struct i915_address_space *vm;
+
+ rcu_read_lock();
+ do {
+ vm = READ_ONCE(src->vm);
+ if (!vm)
+ break;
+
+ if (!kref_get_unless_zero(&vm->ref))
+ continue;
+
+ /*
+ * This ppgtt may have be reallocated between
+ * the read and the kref, and reassigned to a third
+ * context. In order to avoid inadvertent sharing
+ * of this ppgtt with that third context (and not
+ * src), we have to confirm that we have the same
+ * ppgtt after passing through the strong memory
+ * barrier implied by a successful
+ * kref_get_unless_zero().
+ *
+ * Once we have acquired the current ppgtt of src,
+ * we no longer care if it is released from src, as
+ * it cannot be reallocated elsewhere.
+ */
+
+ if (vm == READ_ONCE(src->vm))
+ break;
+
+ i915_vm_put(vm);
+ } while (1);
+ rcu_read_unlock();
+
+ if (vm) {
+ __assign_ppgtt(dst, vm);
+ i915_vm_put(vm);
+ }
+
+ return 0;
+}
+
+static int create_clone(struct i915_user_extension __user *ext, void *data)
+{
+ static int (* const fn[])(struct i915_gem_context *dst,
+ struct i915_gem_context *src) = {
+#define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
+ MAP(ENGINES, clone_engines),
+ MAP(FLAGS, clone_flags),
+ MAP(SCHEDATTR, clone_schedattr),
+ MAP(SSEU, clone_sseu),
+ MAP(TIMELINE, clone_timeline),
+ MAP(VM, clone_vm),
+#undef MAP
+ };
+ struct drm_i915_gem_context_create_ext_clone local;
+ const struct create_ext *arg = data;
+ struct i915_gem_context *dst = arg->ctx;
+ struct i915_gem_context *src;
+ int err, bit;
+
+ if (copy_from_user(&local, ext, sizeof(local)))
+ return -EFAULT;
+
+ BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
+ I915_CONTEXT_CLONE_UNKNOWN);
+
+ if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
+ return -EINVAL;
+
+ if (local.rsvd)
+ return -EINVAL;
+
+ rcu_read_lock();
+ src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
+ rcu_read_unlock();
+ if (!src)
+ return -ENOENT;
+
+ GEM_BUG_ON(src == dst);
+
+ for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
+ if (!(local.flags & BIT(bit)))
+ continue;
+
+ err = fn[bit](dst, src);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static const i915_user_extension_fn create_extensions[] = {
[I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
+ [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
};
static bool client_is_banned(struct drm_i915_file_private *file_priv)
@@ -1572,9 +2181,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
return 0;
err_ctx:
- mutex_lock(&dev->struct_mutex);
context_close(ext_data.ctx);
- mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -1599,10 +2206,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
if (!ctx)
return -ENOENT;
- mutex_lock(&dev->struct_mutex);
context_close(ctx);
- mutex_unlock(&dev->struct_mutex);
-
return 0;
}
@@ -1610,8 +2214,9 @@ static int get_sseu(struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
{
struct drm_i915_gem_context_param_sseu user_sseu;
- struct intel_engine_cs *engine;
struct intel_context *ce;
+ unsigned long lookup;
+ int err;
if (args->size == 0)
goto out;
@@ -1622,25 +2227,33 @@ static int get_sseu(struct i915_gem_context *ctx,
sizeof(user_sseu)))
return -EFAULT;
- if (user_sseu.flags || user_sseu.rsvd)
+ if (user_sseu.rsvd)
return -EINVAL;
- engine = intel_engine_lookup_user(ctx->i915,
- user_sseu.engine.engine_class,
- user_sseu.engine.engine_instance);
- if (!engine)
+ if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
return -EINVAL;
- ce = intel_context_pin_lock(ctx, engine); /* serialises with set_sseu */
+ lookup = 0;
+ if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
+ lookup |= LOOKUP_USER_INDEX;
+
+ ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
+ err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
+ if (err) {
+ intel_context_put(ce);
+ return err;
+ }
+
user_sseu.slice_mask = ce->sseu.slice_mask;
user_sseu.subslice_mask = ce->sseu.subslice_mask;
user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
- intel_context_pin_unlock(ce);
+ intel_context_unlock_pinned(ce);
+ intel_context_put(ce);
if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
sizeof(user_sseu)))
@@ -1672,8 +2285,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_GTT_SIZE:
args->size = 0;
- if (ctx->ppgtt)
- args->value = ctx->ppgtt->vm.total;
+ if (ctx->vm)
+ args->value = ctx->vm->total;
else if (to_i915(dev)->mm.aliasing_ppgtt)
args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
else
@@ -1708,6 +2321,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
ret = get_ppgtt(file_priv, ctx, args);
break;
+ case I915_CONTEXT_PARAM_ENGINES:
+ ret = get_engines(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -1801,6 +2418,23 @@ out_unlock:
return err;
}
+/* GEM context-engines iterator: for_each_gem_engine() */
+struct intel_context *
+i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
+{
+ const struct i915_gem_engines *e = it->engines;
+ struct intel_context *ctx;
+
+ do {
+ if (it->idx >= e->num_engines)
+ return NULL;
+
+ ctx = e->engines[it->idx++];
+ } while (!ctx);
+
+ return ctx;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_context.c"
#include "selftests/i915_gem_context.c"
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 23dcb01bfd82..9691dd062f72 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -1,25 +1,7 @@
/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2016 Intel Corporation
*/
#ifndef __I915_GEM_CONTEXT_H__
@@ -27,9 +9,10 @@
#include "i915_gem_context_types.h"
+#include "gt/intel_context.h"
+
#include "i915_gem.h"
#include "i915_scheduler.h"
-#include "intel_context.h"
#include "intel_device_info.h"
struct drm_device;
@@ -111,6 +94,24 @@ static inline void i915_gem_context_set_force_single_submission(struct i915_gem_
__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
}
+static inline bool
+i915_gem_context_user_engines(const struct i915_gem_context *ctx)
+{
+ return test_bit(CONTEXT_USER_ENGINES, &ctx->flags);
+}
+
+static inline void
+i915_gem_context_set_user_engines(struct i915_gem_context *ctx)
+{
+ set_bit(CONTEXT_USER_ENGINES, &ctx->flags);
+}
+
+static inline void
+i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
+{
+ clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
+}
+
int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx);
static inline int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
{
@@ -133,17 +134,12 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
/* i915_gem_context.c */
int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
-void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file);
void i915_gem_context_close(struct drm_file *file);
-int i915_switch_context(struct i915_request *rq);
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask);
-
void i915_gem_context_release(struct kref *ctx_ref);
struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev);
@@ -179,6 +175,64 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_release);
}
+static inline struct i915_gem_engines *
+i915_gem_context_engines(struct i915_gem_context *ctx)
+{
+ return rcu_dereference_protected(ctx->engines,
+ lockdep_is_held(&ctx->engines_mutex));
+}
+
+static inline struct i915_gem_engines *
+i915_gem_context_lock_engines(struct i915_gem_context *ctx)
+ __acquires(&ctx->engines_mutex)
+{
+ mutex_lock(&ctx->engines_mutex);
+ return i915_gem_context_engines(ctx);
+}
+
+static inline void
+i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
+ __releases(&ctx->engines_mutex)
+{
+ mutex_unlock(&ctx->engines_mutex);
+}
+
+static inline struct intel_context *
+i915_gem_context_lookup_engine(struct i915_gem_context *ctx, unsigned int idx)
+{
+ return i915_gem_context_engines(ctx)->engines[idx];
+}
+
+static inline struct intel_context *
+i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
+{
+ struct intel_context *ce = ERR_PTR(-EINVAL);
+
+ rcu_read_lock(); {
+ struct i915_gem_engines *e = rcu_dereference(ctx->engines);
+ if (likely(idx < e->num_engines && e->engines[idx]))
+ ce = intel_context_get(e->engines[idx]);
+ } rcu_read_unlock();
+
+ return ce;
+}
+
+static inline void
+i915_gem_engines_iter_init(struct i915_gem_engines_iter *it,
+ struct i915_gem_engines *engines)
+{
+ GEM_BUG_ON(!engines);
+ it->engines = engines;
+ it->idx = 0;
+}
+
+struct intel_context *
+i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
+
+#define for_each_gem_engine(ce, engines, it) \
+ for (i915_gem_engines_iter_init(&(it), (engines)); \
+ ((ce) = i915_gem_engines_iter_next(&(it)));)
+
struct i915_lut_handle *i915_lut_handle_alloc(void);
void i915_lut_handle_free(struct i915_lut_handle *lut);
diff --git a/drivers/gpu/drm/i915/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index e2ec58b10fb2..cc513410eeef 100644
--- a/drivers/gpu/drm/i915/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -17,17 +17,29 @@
#include <linux/rcupdate.h>
#include <linux/types.h>
+#include "gt/intel_context_types.h"
+
#include "i915_scheduler.h"
-#include "intel_context_types.h"
struct pid;
struct drm_i915_private;
struct drm_i915_file_private;
-struct i915_hw_ppgtt;
+struct i915_address_space;
struct i915_timeline;
struct intel_ring;
+struct i915_gem_engines {
+ struct rcu_head rcu;
+ unsigned int num_engines;
+ struct intel_context *engines[];
+};
+
+struct i915_gem_engines_iter {
+ unsigned int idx;
+ const struct i915_gem_engines *engines;
+};
+
/**
* struct i915_gem_context - client state
*
@@ -41,10 +53,34 @@ struct i915_gem_context {
/** file_priv: owning file descriptor */
struct drm_i915_file_private *file_priv;
+ /**
+ * @engines: User defined engines for this context
+ *
+ * Various uAPI offer the ability to lookup up an
+ * index from this array to select an engine operate on.
+ *
+ * Multiple logically distinct instances of the same engine
+ * may be defined in the array, as well as composite virtual
+ * engines.
+ *
+ * Execbuf uses the I915_EXEC_RING_MASK as an index into this
+ * array to select which HW context + engine to execute on. For
+ * the default array, the user_ring_map[] is used to translate
+ * the legacy uABI onto the approprate index (e.g. both
+ * I915_EXEC_DEFAULT and I915_EXEC_RENDER select the same
+ * context, and I915_EXEC_BSD is weird). For a use defined
+ * array, execbuf uses I915_EXEC_RING_MASK as a plain index.
+ *
+ * User defined by I915_CONTEXT_PARAM_ENGINE (when the
+ * CONTEXT_USER_ENGINES flag is set).
+ */
+ struct i915_gem_engines __rcu *engines;
+ struct mutex engines_mutex; /* guards writes to engines */
+
struct i915_timeline *timeline;
/**
- * @ppgtt: unique address space (GTT)
+ * @vm: unique address space (GTT)
*
* In full-ppgtt mode, each context has its own address space ensuring
* complete seperation of one client from all others.
@@ -52,7 +88,7 @@ struct i915_gem_context {
* In other modes, this is a NULL pointer with the expectation that
* the caller uses the shared global GTT.
*/
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_address_space *vm;
/**
* @pid: process id of creator
@@ -109,6 +145,7 @@ struct i915_gem_context {
#define CONTEXT_BANNED 0
#define CONTEXT_CLOSED 1
#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
+#define CONTEXT_USER_ENGINES 3
/**
* @hw_id: - unique identifier for the context
@@ -128,15 +165,10 @@ struct i915_gem_context {
atomic_t hw_id_pin_count;
struct list_head hw_id_link;
- struct list_head active_engines;
struct mutex mutex;
struct i915_sched_attr sched;
- /** hw_contexts: per-engine logical HW state */
- struct rb_root hw_contexts;
- spinlock_t hw_contexts_lock;
-
/** ring_size: size for allocating the per-engine ring buffer */
u32 ring_size;
/** desc_template: invariant fields for the HW context descriptor */
@@ -159,17 +191,12 @@ struct i915_gem_context {
/** remap_slice: Bitmask of cache lines that need remapping */
u8 remap_slice;
- /** handles_vma: rbtree to look up our context specific obj/vma for
+ /**
+ * handles_vma: rbtree to look up our context specific obj/vma for
* the user handle. (user handles are per fd, but the binding is
* per vm, which may be one per context or shared with the global GTT)
*/
struct radix_tree_root handles_vma;
-
- /** handles_list: reverse list of all the rbtree entries in use for
- * this context, which allows us to free all the allocations on
- * context close.
- */
- struct list_head handles_list;
};
#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 5a101a9462d8..cbf1701d3acc 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -1,34 +1,16 @@
/*
- * Copyright 2012 Red Hat Inc
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
+ * SPDX-License-Identifier: MIT
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Dave Airlie <airlied@redhat.com>
+ * Copyright 2012 Red Hat Inc
*/
#include <linux/dma-buf.h>
+#include <linux/highmem.h>
#include <linux/reservation.h>
-
#include "i915_drv.h"
+#include "i915_gem_object.h"
+#include "i915_scatterlist.h"
static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
{
@@ -169,7 +151,6 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
- struct drm_device *dev = obj->base.dev;
bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
int err;
@@ -177,12 +158,12 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
if (err)
return err;
- err = i915_mutex_lock_interruptible(dev);
+ err = i915_gem_object_lock_interruptible(obj);
if (err)
goto out;
err = i915_gem_object_set_to_cpu_domain(obj, write);
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_object_unlock(obj);
out:
i915_gem_object_unpin_pages(obj);
@@ -192,19 +173,18 @@ out:
static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
- struct drm_device *dev = obj->base.dev;
int err;
err = i915_gem_object_pin_pages(obj);
if (err)
return err;
- err = i915_mutex_lock_interruptible(dev);
+ err = i915_gem_object_lock_interruptible(obj);
if (err)
goto out;
err = i915_gem_object_set_to_gtt_domain(obj, false);
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_object_unlock(obj);
out:
i915_gem_object_unpin_pages(obj);
@@ -234,7 +214,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
exp_info.size = gem_obj->size;
exp_info.flags = flags;
exp_info.priv = gem_obj;
- exp_info.resv = obj->resv;
+ exp_info.resv = obj->base.resv;
if (obj->ops->dmabuf_export) {
int ret = obj->ops->dmabuf_export(obj);
@@ -310,7 +290,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach;
- obj->resv = dma_buf->resv;
+ obj->base.resv = dma_buf->resv;
/* We use GTT as shorthand for a coherent domain, one that is
* neither in the GPU cache nor in the CPU cache, where all
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
new file mode 100644
index 000000000000..2e3ce2a69653
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -0,0 +1,796 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2016 Intel Corporation
+ */
+
+#include "display/intel_frontbuffer.h"
+
+#include "i915_drv.h"
+#include "i915_gem_clflush.h"
+#include "i915_gem_gtt.h"
+#include "i915_gem_ioctls.h"
+#include "i915_gem_object.h"
+#include "i915_vma.h"
+
+static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
+{
+ /*
+ * We manually flush the CPU domain so that we can override and
+ * force the flush for the display, and perform it asyncrhonously.
+ */
+ i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+ if (obj->cache_dirty)
+ i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
+ obj->write_domain = 0;
+}
+
+void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
+{
+ if (!READ_ONCE(obj->pin_global))
+ return;
+
+ i915_gem_object_lock(obj);
+ __i915_gem_object_flush_for_display(obj);
+ i915_gem_object_unlock(obj);
+}
+
+/**
+ * Moves a single object to the WC read, and possibly write domain.
+ * @obj: object to act on
+ * @write: ask for write access or read only
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+int
+i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
+{
+ int ret;
+
+ assert_object_held(obj);
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ (write ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (obj->write_domain == I915_GEM_DOMAIN_WC)
+ return 0;
+
+ /* Flush and acquire obj->pages so that we are coherent through
+ * direct access in memory with previous cached writes through
+ * shmemfs and that our cache domain tracking remains valid.
+ * For example, if the obj->filp was moved to swap without us
+ * being notified and releasing the pages, we would mistakenly
+ * continue to assume that the obj remained out of the CPU cached
+ * domain.
+ */
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
+
+ /* Serialise direct access to this object with the barriers for
+ * coherent writes from the GPU, by effectively invalidating the
+ * WC domain upon first access.
+ */
+ if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
+ mb();
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_WC;
+ if (write) {
+ obj->read_domains = I915_GEM_DOMAIN_WC;
+ obj->write_domain = I915_GEM_DOMAIN_WC;
+ obj->mm.dirty = true;
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ return 0;
+}
+
+/**
+ * Moves a single object to the GTT read, and possibly write domain.
+ * @obj: object to act on
+ * @write: ask for write access or read only
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+int
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
+{
+ int ret;
+
+ assert_object_held(obj);
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ (write ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (obj->write_domain == I915_GEM_DOMAIN_GTT)
+ return 0;
+
+ /* Flush and acquire obj->pages so that we are coherent through
+ * direct access in memory with previous cached writes through
+ * shmemfs and that our cache domain tracking remains valid.
+ * For example, if the obj->filp was moved to swap without us
+ * being notified and releasing the pages, we would mistakenly
+ * continue to assume that the obj remained out of the CPU cached
+ * domain.
+ */
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
+
+ /* Serialise direct access to this object with the barriers for
+ * coherent writes from the GPU, by effectively invalidating the
+ * GTT domain upon first access.
+ */
+ if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
+ mb();
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ if (write) {
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
+ obj->write_domain = I915_GEM_DOMAIN_GTT;
+ obj->mm.dirty = true;
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ return 0;
+}
+
+/**
+ * Changes the cache-level of an object across all VMA.
+ * @obj: object to act on
+ * @cache_level: new cache level to set for the object
+ *
+ * After this function returns, the object will be in the new cache-level
+ * across all GTT and the contents of the backing storage will be coherent,
+ * with respect to the new cache-level. In order to keep the backing storage
+ * coherent for all users, we only allow a single cache level to be set
+ * globally on the object and prevent it from being changed whilst the
+ * hardware is reading from the object. That is if the object is currently
+ * on the scanout it will be set to uncached (or equivalent display
+ * cache coherency) and all non-MOCS GPU access will also be uncached so
+ * that all direct access to the scanout remains coherent.
+ */
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
+{
+ struct i915_vma *vma;
+ int ret;
+
+ assert_object_held(obj);
+
+ if (obj->cache_level == cache_level)
+ return 0;
+
+ /* Inspect the list of currently bound VMA and unbind any that would
+ * be invalid given the new cache-level. This is principally to
+ * catch the issue of the CS prefetch crossing page boundaries and
+ * reading an invalid PTE on older architectures.
+ */
+restart:
+ list_for_each_entry(vma, &obj->vma.list, obj_link) {
+ if (!drm_mm_node_allocated(&vma->node))
+ continue;
+
+ if (i915_vma_is_pinned(vma)) {
+ DRM_DEBUG("can not change the cache level of pinned objects\n");
+ return -EBUSY;
+ }
+
+ if (!i915_vma_is_closed(vma) &&
+ i915_gem_valid_gtt_space(vma, cache_level))
+ continue;
+
+ ret = i915_vma_unbind(vma);
+ if (ret)
+ return ret;
+
+ /* As unbinding may affect other elements in the
+ * obj->vma_list (due to side-effects from retiring
+ * an active vma), play safe and restart the iterator.
+ */
+ goto restart;
+ }
+
+ /* We can reuse the existing drm_mm nodes but need to change the
+ * cache-level on the PTE. We could simply unbind them all and
+ * rebind with the correct cache-level on next use. However since
+ * we already have a valid slot, dma mapping, pages etc, we may as
+ * rewrite the PTE in the belief that doing so tramples upon less
+ * state and so involves less work.
+ */
+ if (atomic_read(&obj->bind_count)) {
+ /* Before we change the PTE, the GPU must not be accessing it.
+ * If we wait upon the object, we know that all the bound
+ * VMA are no longer active.
+ */
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (!HAS_LLC(to_i915(obj->base.dev)) &&
+ cache_level != I915_CACHE_NONE) {
+ /* Access to snoopable pages through the GTT is
+ * incoherent and on some machines causes a hard
+ * lockup. Relinquish the CPU mmaping to force
+ * userspace to refault in the pages and we can
+ * then double check if the GTT mapping is still
+ * valid for that pointer access.
+ */
+ i915_gem_object_release_mmap(obj);
+
+ /* As we no longer need a fence for GTT access,
+ * we can relinquish it now (and so prevent having
+ * to steal a fence from someone else on the next
+ * fence request). Note GPU activity would have
+ * dropped the fence as all snoopable access is
+ * supposed to be linear.
+ */
+ for_each_ggtt_vma(vma, obj) {
+ ret = i915_vma_put_fence(vma);
+ if (ret)
+ return ret;
+ }
+ } else {
+ /* We either have incoherent backing store and
+ * so no GTT access or the architecture is fully
+ * coherent. In such cases, existing GTT mmaps
+ * ignore the cache bit in the PTE and we can
+ * rewrite it without confusing the GPU or having
+ * to force userspace to fault back in its mmaps.
+ */
+ }
+
+ list_for_each_entry(vma, &obj->vma.list, obj_link) {
+ if (!drm_mm_node_allocated(&vma->node))
+ continue;
+
+ ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
+ if (ret)
+ return ret;
+ }
+ }
+
+ list_for_each_entry(vma, &obj->vma.list, obj_link)
+ vma->node.color = cache_level;
+ i915_gem_object_set_cache_coherency(obj, cache_level);
+ obj->cache_dirty = true; /* Always invalidate stale cachelines */
+
+ return 0;
+}
+
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_caching *args = data;
+ struct drm_i915_gem_object *obj;
+ int err = 0;
+
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, args->handle);
+ if (!obj) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ switch (obj->cache_level) {
+ case I915_CACHE_LLC:
+ case I915_CACHE_L3_LLC:
+ args->caching = I915_CACHING_CACHED;
+ break;
+
+ case I915_CACHE_WT:
+ args->caching = I915_CACHING_DISPLAY;
+ break;
+
+ default:
+ args->caching = I915_CACHING_NONE;
+ break;
+ }
+out:
+ rcu_read_unlock();
+ return err;
+}
+
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_i915_gem_caching *args = data;
+ struct drm_i915_gem_object *obj;
+ enum i915_cache_level level;
+ int ret = 0;
+
+ switch (args->caching) {
+ case I915_CACHING_NONE:
+ level = I915_CACHE_NONE;
+ break;
+ case I915_CACHING_CACHED:
+ /*
+ * Due to a HW issue on BXT A stepping, GPU stores via a
+ * snooped mapping may leave stale data in a corresponding CPU
+ * cacheline, whereas normally such cachelines would get
+ * invalidated.
+ */
+ if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
+ return -ENODEV;
+
+ level = I915_CACHE_LLC;
+ break;
+ case I915_CACHING_DISPLAY:
+ level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ obj = i915_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ /*
+ * The caching mode of proxy object is handled by its generator, and
+ * not allowed to be changed by userspace.
+ */
+ if (i915_gem_object_is_proxy(obj)) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (obj->cache_level == level)
+ goto out;
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ goto out;
+
+ ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ if (ret)
+ goto out;
+
+ ret = i915_gem_object_lock_interruptible(obj);
+ if (ret == 0) {
+ ret = i915_gem_object_set_cache_level(obj, level);
+ i915_gem_object_unlock(obj);
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
+
+out:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+/*
+ * Prepare buffer for display plane (scanout, cursors, etc). Can be called from
+ * an uninterruptible phase (modesetting) and allows any flushes to be pipelined
+ * (for pageflips). We only flush the caches while preparing the buffer for
+ * display, the callers are responsible for frontbuffer flush.
+ */
+struct i915_vma *
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ u32 alignment,
+ const struct i915_ggtt_view *view,
+ unsigned int flags)
+{
+ struct i915_vma *vma;
+ int ret;
+
+ assert_object_held(obj);
+
+ /* Mark the global pin early so that we account for the
+ * display coherency whilst setting up the cache domains.
+ */
+ obj->pin_global++;
+
+ /* The display engine is not coherent with the LLC cache on gen6. As
+ * a result, we make sure that the pinning that is about to occur is
+ * done with uncached PTEs. This is lowest common denominator for all
+ * chipsets.
+ *
+ * However for gen6+, we could do better by using the GFDT bit instead
+ * of uncaching, which would allow us to flush all the LLC-cached data
+ * with that bit in the PTE to main memory with just one PIPE_CONTROL.
+ */
+ ret = i915_gem_object_set_cache_level(obj,
+ HAS_WT(to_i915(obj->base.dev)) ?
+ I915_CACHE_WT : I915_CACHE_NONE);
+ if (ret) {
+ vma = ERR_PTR(ret);
+ goto err_unpin_global;
+ }
+
+ /* As the user may map the buffer once pinned in the display plane
+ * (e.g. libkms for the bootup splash), we have to ensure that we
+ * always use map_and_fenceable for all scanout buffers. However,
+ * it may simply be too big to fit into mappable, in which case
+ * put it anyway and hope that userspace can cope (but always first
+ * try to preserve the existing ABI).
+ */
+ vma = ERR_PTR(-ENOSPC);
+ if ((flags & PIN_MAPPABLE) == 0 &&
+ (!view || view->type == I915_GGTT_VIEW_NORMAL))
+ vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
+ flags |
+ PIN_MAPPABLE |
+ PIN_NONBLOCK);
+ if (IS_ERR(vma))
+ vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
+ if (IS_ERR(vma))
+ goto err_unpin_global;
+
+ vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
+
+ __i915_gem_object_flush_for_display(obj);
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
+
+ return vma;
+
+err_unpin_global:
+ obj->pin_global--;
+ return vma;
+}
+
+static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_vma *vma;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ mutex_lock(&i915->ggtt.vm.mutex);
+ for_each_ggtt_vma(vma, obj) {
+ if (!drm_mm_node_allocated(&vma->node))
+ continue;
+
+ list_move_tail(&vma->vm_link, &vma->vm->bound_list);
+ }
+ mutex_unlock(&i915->ggtt.vm.mutex);
+
+ if (i915_gem_object_is_shrinkable(obj)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+
+ if (obj->mm.madv == I915_MADV_WILLNEED)
+ list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ }
+}
+
+void
+i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ assert_object_held(obj);
+
+ if (WARN_ON(obj->pin_global == 0))
+ return;
+
+ if (--obj->pin_global == 0)
+ vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
+
+ /* Bump the LRU to try and avoid premature eviction whilst flipping */
+ i915_gem_object_bump_inactive_ggtt(obj);
+
+ i915_vma_unpin(vma);
+}
+
+/**
+ * Moves a single object to the CPU read, and possibly write domain.
+ * @obj: object to act on
+ * @write: requesting write or read-only access
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+int
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
+{
+ int ret;
+
+ assert_object_held(obj);
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ (write ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ return ret;
+
+ i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+
+ /* Flush the CPU cache if it's still invalid. */
+ if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+ i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
+ obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ }
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
+
+ /* If we're writing through the CPU, then the GPU read domains will
+ * need to be invalidated at next use.
+ */
+ if (write)
+ __start_cpu_write(obj);
+
+ return 0;
+}
+
+static inline enum fb_op_origin
+fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
+{
+ return (domain == I915_GEM_DOMAIN_GTT ?
+ obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
+}
+
+/**
+ * Called when user space prepares to use an object with the CPU, either
+ * through the mmap ioctl's mapping or a GTT mapping.
+ * @dev: drm device
+ * @data: ioctl data blob
+ * @file: drm file
+ */
+int
+i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_set_domain *args = data;
+ struct drm_i915_gem_object *obj;
+ u32 read_domains = args->read_domains;
+ u32 write_domain = args->write_domain;
+ int err;
+
+ /* Only handle setting domains to types used by the CPU. */
+ if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
+ return -EINVAL;
+
+ /*
+ * Having something in the write domain implies it's in the read
+ * domain, and only that read domain. Enforce that in the request.
+ */
+ if (write_domain && read_domains != write_domain)
+ return -EINVAL;
+
+ if (!read_domains)
+ return 0;
+
+ obj = i915_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ /*
+ * Already in the desired write domain? Nothing for us to do!
+ *
+ * We apply a little bit of cunning here to catch a broader set of
+ * no-ops. If obj->write_domain is set, we must be in the same
+ * obj->read_domains, and only that domain. Therefore, if that
+ * obj->write_domain matches the request read_domains, we are
+ * already in the same read/write domain and can skip the operation,
+ * without having to further check the requested write_domain.
+ */
+ if (READ_ONCE(obj->write_domain) == read_domains) {
+ err = 0;
+ goto out;
+ }
+
+ /*
+ * Try to flush the object off the GPU without holding the lock.
+ * We will repeat the flush holding the lock in the normal manner
+ * to catch cases where we are gazumped.
+ */
+ err = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_PRIORITY |
+ (write_domain ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ goto out;
+
+ /*
+ * Proxy objects do not control access to the backing storage, ergo
+ * they cannot be used as a means to manipulate the cache domain
+ * tracking for that backing storage. The proxy object is always
+ * considered to be outside of any cache domain.
+ */
+ if (i915_gem_object_is_proxy(obj)) {
+ err = -ENXIO;
+ goto out;
+ }
+
+ /*
+ * Flush and acquire obj->pages so that we are coherent through
+ * direct access in memory with previous cached writes through
+ * shmemfs and that our cache domain tracking remains valid.
+ * For example, if the obj->filp was moved to swap without us
+ * being notified and releasing the pages, we would mistakenly
+ * continue to assume that the obj remained out of the CPU cached
+ * domain.
+ */
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out;
+
+ err = i915_gem_object_lock_interruptible(obj);
+ if (err)
+ goto out_unpin;
+
+ if (read_domains & I915_GEM_DOMAIN_WC)
+ err = i915_gem_object_set_to_wc_domain(obj, write_domain);
+ else if (read_domains & I915_GEM_DOMAIN_GTT)
+ err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
+ else
+ err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
+
+ /* And bump the LRU for this access */
+ i915_gem_object_bump_inactive_ggtt(obj);
+
+ i915_gem_object_unlock(obj);
+
+ if (write_domain != 0)
+ intel_fb_obj_invalidate(obj,
+ fb_write_origin(obj, write_domain));
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+/*
+ * Pins the specified object's pages and synchronizes the object with
+ * GPU accesses. Sets needs_clflush to non-zero if the caller should
+ * flush the object from the CPU cache.
+ */
+int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
+ unsigned int *needs_clflush)
+{
+ int ret;
+
+ *needs_clflush = 0;
+ if (!i915_gem_object_has_struct_page(obj))
+ return -ENODEV;
+
+ ret = i915_gem_object_lock_interruptible(obj);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ goto err_unlock;
+
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ goto err_unlock;
+
+ if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
+ !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+ ret = i915_gem_object_set_to_cpu_domain(obj, false);
+ if (ret)
+ goto err_unpin;
+ else
+ goto out;
+ }
+
+ i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+
+ /* If we're not in the cpu read domain, set ourself into the gtt
+ * read domain and manually flush cachelines (if required). This
+ * optimizes for the case when the gpu will dirty the data
+ * anyway again before the next pread happens.
+ */
+ if (!obj->cache_dirty &&
+ !(obj->read_domains & I915_GEM_DOMAIN_CPU))
+ *needs_clflush = CLFLUSH_BEFORE;
+
+out:
+ /* return with the pages pinned */
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err_unlock:
+ i915_gem_object_unlock(obj);
+ return ret;
+}
+
+int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
+ unsigned int *needs_clflush)
+{
+ int ret;
+
+ *needs_clflush = 0;
+ if (!i915_gem_object_has_struct_page(obj))
+ return -ENODEV;
+
+ ret = i915_gem_object_lock_interruptible(obj);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ goto err_unlock;
+
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ goto err_unlock;
+
+ if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
+ !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
+ if (ret)
+ goto err_unpin;
+ else
+ goto out;
+ }
+
+ i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+
+ /* If we're not in the cpu write domain, set ourself into the
+ * gtt write domain and manually flush cachelines (as required).
+ * This optimizes for the case when the gpu will use the data
+ * right away and we therefore have to clflush anyway.
+ */
+ if (!obj->cache_dirty) {
+ *needs_clflush |= CLFLUSH_AFTER;
+
+ /*
+ * Same trick applies to invalidate partially written
+ * cachelines read before writing.
+ */
+ if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
+ *needs_clflush |= CLFLUSH_BEFORE;
+ }
+
+out:
+ intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+ obj->mm.dirty = true;
+ /* return with the pages pinned */
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err_unlock:
+ i915_gem_object_unlock(obj);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index c83d2a195d15..5fae0e50aad0 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1,29 +1,7 @@
/*
- * Copyright © 2008,2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- * Chris Wilson <chris@chris-wilson.co.uk>
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2008,2010 Intel Corporation
*/
#include <linux/intel-iommu.h>
@@ -34,11 +12,17 @@
#include <drm/drm_syncobj.h>
#include <drm/i915_drm.h>
-#include "i915_drv.h"
+#include "display/intel_frontbuffer.h"
+
+#include "gem/i915_gem_ioctls.h"
+#include "gt/intel_context.h"
+#include "gt/intel_gt_pm.h"
+
+#include "i915_gem_ioctls.h"
#include "i915_gem_clflush.h"
+#include "i915_gem_context.h"
#include "i915_trace.h"
#include "intel_drv.h"
-#include "intel_frontbuffer.h"
enum {
FORCE_CPU_RELOC = 1,
@@ -236,7 +220,8 @@ struct i915_execbuffer {
unsigned int *flags;
struct intel_engine_cs *engine; /** engine to queue the request to */
- struct i915_gem_context *ctx; /** context for building the request */
+ struct intel_context *context; /* logical state for the request */
+ struct i915_gem_context *gem_context; /** caller's context */
struct i915_address_space *vm; /** GTT and vma for the request */
struct i915_request *request; /** our request to build */
@@ -738,9 +723,9 @@ static int eb_select_context(struct i915_execbuffer *eb)
if (unlikely(!ctx))
return -ENOENT;
- eb->ctx = ctx;
- if (ctx->ppgtt) {
- eb->vm = &ctx->ppgtt->vm;
+ eb->gem_context = ctx;
+ if (ctx->vm) {
+ eb->vm = ctx->vm;
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
} else {
eb->vm = &eb->i915->ggtt.vm;
@@ -784,7 +769,6 @@ static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring)
static int eb_wait_for_ring(const struct i915_execbuffer *eb)
{
- const struct intel_context *ce;
struct i915_request *rq;
int ret = 0;
@@ -794,11 +778,7 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb)
* keeping all of their resources pinned.
*/
- ce = intel_context_lookup(eb->ctx, eb->engine);
- if (!ce || !ce->ring) /* first use, assume empty! */
- return 0;
-
- rq = __eb_wait_for_ring(ce->ring);
+ rq = __eb_wait_for_ring(eb->context->ring);
if (rq) {
mutex_unlock(&eb->i915->drm.struct_mutex);
@@ -817,15 +797,12 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb)
static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
- struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
+ struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
struct drm_i915_gem_object *obj;
unsigned int i, batch;
int err;
- if (unlikely(i915_gem_context_is_closed(eb->ctx)))
- return -ENOENT;
-
- if (unlikely(i915_gem_context_is_banned(eb->ctx)))
+ if (unlikely(i915_gem_context_is_banned(eb->gem_context)))
return -EIO;
INIT_LIST_HEAD(&eb->relocs);
@@ -833,6 +810,12 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
batch = eb_batch_index(eb);
+ mutex_lock(&eb->gem_context->mutex);
+ if (unlikely(i915_gem_context_is_closed(eb->gem_context))) {
+ err = -ENOENT;
+ goto err_ctx;
+ }
+
for (i = 0; i < eb->buffer_count; i++) {
u32 handle = eb->exec[i].handle;
struct i915_lut_handle *lut;
@@ -866,13 +849,15 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
goto err_obj;
}
- /* transfer ref to ctx */
- if (!vma->open_count++)
+ /* transfer ref to lut */
+ if (!atomic_fetch_inc(&vma->open_count))
i915_vma_reopen(vma);
- list_add(&lut->obj_link, &obj->lut_list);
- list_add(&lut->ctx_link, &eb->ctx->handles_list);
- lut->ctx = eb->ctx;
lut->handle = handle;
+ lut->ctx = eb->gem_context;
+
+ i915_gem_object_lock(obj);
+ list_add(&lut->obj_link, &obj->lut_list);
+ i915_gem_object_unlock(obj);
add_vma:
err = eb_add_vma(eb, i, batch, vma);
@@ -885,6 +870,8 @@ add_vma:
eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
}
+ mutex_unlock(&eb->gem_context->mutex);
+
eb->args->flags |= __EXEC_VALIDATED;
return eb_reserve(eb);
@@ -892,6 +879,8 @@ err_obj:
i915_gem_object_put(obj);
err_vma:
eb->vma[i] = NULL;
+err_ctx:
+ mutex_unlock(&eb->gem_context->mutex);
return err;
}
@@ -1027,7 +1016,7 @@ static void reloc_cache_reset(struct reloc_cache *cache)
mb();
kunmap_atomic(vaddr);
- i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
+ i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
} else {
wmb();
io_mapping_unmap_atomic((void __iomem *)vaddr);
@@ -1059,7 +1048,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
unsigned int flushes;
int err;
- err = i915_gem_obj_prepare_shmem_write(obj, &flushes);
+ err = i915_gem_object_prepare_write(obj, &flushes);
if (err)
return ERR_PTR(err);
@@ -1096,7 +1085,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
if (use_cpu_reloc(cache, obj))
return NULL;
+ i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
if (err)
return ERR_PTR(err);
@@ -1185,6 +1176,26 @@ static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
*addr = value;
}
+static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ int err;
+
+ i915_vma_lock(vma);
+
+ if (obj->cache_dirty & ~obj->cache_coherent)
+ i915_gem_clflush_object(obj, 0);
+ obj->write_domain = 0;
+
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+
+ i915_vma_unlock(vma);
+
+ return err;
+}
+
static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
struct i915_vma *vma,
unsigned int len)
@@ -1196,15 +1207,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
u32 *cmd;
int err;
- if (DBG_FORCE_RELOC == FORCE_GPU_RELOC) {
- obj = vma->obj;
- if (obj->cache_dirty & ~obj->cache_coherent)
- i915_gem_clflush_object(obj, 0);
- obj->write_domain = 0;
- }
-
- GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU);
-
obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -1227,13 +1229,13 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err)
goto err_unmap;
- rq = i915_request_alloc(eb->engine, eb->ctx);
+ rq = i915_request_create(eb->context);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
}
- err = i915_request_await_object(rq, vma->obj, true);
+ err = reloc_move_to_gpu(rq, vma);
if (err)
goto err_request;
@@ -1241,14 +1243,12 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
batch->node.start, PAGE_SIZE,
cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
if (err)
- goto err_request;
+ goto skip_request;
+ i915_vma_lock(batch);
GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
err = i915_vma_move_to_active(batch, rq, 0);
- if (err)
- goto skip_request;
-
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(batch);
if (err)
goto skip_request;
@@ -1858,24 +1858,59 @@ slow:
static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
const unsigned int count = eb->buffer_count;
+ struct ww_acquire_ctx acquire;
unsigned int i;
- int err;
+ int err = 0;
+
+ ww_acquire_init(&acquire, &reservation_ww_class);
for (i = 0; i < count; i++) {
+ struct i915_vma *vma = eb->vma[i];
+
+ err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
+ if (!err)
+ continue;
+
+ GEM_BUG_ON(err == -EALREADY); /* No duplicate vma */
+
+ if (err == -EDEADLK) {
+ GEM_BUG_ON(i == 0);
+ do {
+ int j = i - 1;
+
+ ww_mutex_unlock(&eb->vma[j]->resv->lock);
+
+ swap(eb->flags[i], eb->flags[j]);
+ swap(eb->vma[i], eb->vma[j]);
+ eb->vma[i]->exec_flags = &eb->flags[i];
+ } while (--i);
+ GEM_BUG_ON(vma != eb->vma[0]);
+ vma->exec_flags = &eb->flags[0];
+
+ err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
+ &acquire);
+ }
+ if (err)
+ break;
+ }
+ ww_acquire_done(&acquire);
+
+ while (i--) {
unsigned int flags = eb->flags[i];
struct i915_vma *vma = eb->vma[i];
struct drm_i915_gem_object *obj = vma->obj;
+ assert_vma_held(vma);
+
if (flags & EXEC_OBJECT_CAPTURE) {
struct i915_capture_list *capture;
capture = kmalloc(sizeof(*capture), GFP_KERNEL);
- if (unlikely(!capture))
- return -ENOMEM;
-
- capture->next = eb->request->capture_list;
- capture->vma = eb->vma[i];
- eb->request->capture_list = capture;
+ if (capture) {
+ capture->next = eb->request->capture_list;
+ capture->vma = vma;
+ eb->request->capture_list = capture;
+ }
}
/*
@@ -1895,24 +1930,15 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
flags &= ~EXEC_OBJECT_ASYNC;
}
- if (flags & EXEC_OBJECT_ASYNC)
- continue;
-
- err = i915_request_await_object
- (eb->request, obj, flags & EXEC_OBJECT_WRITE);
- if (err)
- return err;
- }
+ if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) {
+ err = i915_request_await_object
+ (eb->request, obj, flags & EXEC_OBJECT_WRITE);
+ }
- for (i = 0; i < count; i++) {
- unsigned int flags = eb->flags[i];
- struct i915_vma *vma = eb->vma[i];
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, eb->request, flags);
- err = i915_vma_move_to_active(vma, eb->request, flags);
- if (unlikely(err)) {
- i915_request_skip(eb->request, err);
- return err;
- }
+ i915_vma_unlock(vma);
__eb_unreserve_vma(vma, flags);
vma->exec_flags = NULL;
@@ -1920,12 +1946,20 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
i915_vma_put(vma);
}
+ ww_acquire_fini(&acquire);
+
+ if (unlikely(err))
+ goto err_skip;
+
eb->exec = NULL;
/* Unconditionally flush any chipset caches (for streaming writes). */
i915_gem_chipset_flush(eb->i915);
-
return 0;
+
+err_skip:
+ i915_request_skip(eb->request, err);
+ return err;
}
static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
@@ -2079,9 +2113,7 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
return file_priv->bsd_engine;
}
-#define I915_USER_RINGS (4)
-
-static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
+static const enum intel_engine_id user_ring_map[] = {
[I915_EXEC_DEFAULT] = RCS0,
[I915_EXEC_RENDER] = RCS0,
[I915_EXEC_BLT] = BCS0,
@@ -2089,31 +2121,57 @@ static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
[I915_EXEC_VEBOX] = VECS0
};
-static struct intel_engine_cs *
-eb_select_engine(struct drm_i915_private *dev_priv,
- struct drm_file *file,
- struct drm_i915_gem_execbuffer2 *args)
+static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
{
- unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
- struct intel_engine_cs *engine;
+ int err;
- if (user_ring_id > I915_USER_RINGS) {
- DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
- return NULL;
- }
+ /*
+ * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
+ * EIO if the GPU is already wedged.
+ */
+ err = i915_terminally_wedged(eb->i915);
+ if (err)
+ return err;
+
+ /*
+ * Pinning the contexts may generate requests in order to acquire
+ * GGTT space, so do this first before we reserve a seqno for
+ * ourselves.
+ */
+ err = intel_context_pin(ce);
+ if (err)
+ return err;
- if ((user_ring_id != I915_EXEC_BSD) &&
- ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
+ eb->engine = ce->engine;
+ eb->context = ce;
+ return 0;
+}
+
+static void eb_unpin_context(struct i915_execbuffer *eb)
+{
+ intel_context_unpin(eb->context);
+}
+
+static unsigned int
+eb_select_legacy_ring(struct i915_execbuffer *eb,
+ struct drm_file *file,
+ struct drm_i915_gem_execbuffer2 *args)
+{
+ struct drm_i915_private *i915 = eb->i915;
+ unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
+
+ if (user_ring_id != I915_EXEC_BSD &&
+ (args->flags & I915_EXEC_BSD_MASK)) {
DRM_DEBUG("execbuf with non bsd ring but with invalid "
"bsd dispatch flags: %d\n", (int)(args->flags));
- return NULL;
+ return -1;
}
- if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(dev_priv, VCS1)) {
+ if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(i915, VCS1)) {
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
- bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
+ bsd_idx = gen8_dispatch_bsd_engine(i915, file);
} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
bsd_idx <= I915_EXEC_BSD_RING2) {
bsd_idx >>= I915_EXEC_BSD_SHIFT;
@@ -2121,20 +2179,42 @@ eb_select_engine(struct drm_i915_private *dev_priv,
} else {
DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
bsd_idx);
- return NULL;
+ return -1;
}
- engine = dev_priv->engine[_VCS(bsd_idx)];
- } else {
- engine = dev_priv->engine[user_ring_map[user_ring_id]];
+ return _VCS(bsd_idx);
}
- if (!engine) {
- DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
- return NULL;
+ if (user_ring_id >= ARRAY_SIZE(user_ring_map)) {
+ DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
+ return -1;
}
- return engine;
+ return user_ring_map[user_ring_id];
+}
+
+static int
+eb_select_engine(struct i915_execbuffer *eb,
+ struct drm_file *file,
+ struct drm_i915_gem_execbuffer2 *args)
+{
+ struct intel_context *ce;
+ unsigned int idx;
+ int err;
+
+ if (i915_gem_context_user_engines(eb->gem_context))
+ idx = args->flags & I915_EXEC_RING_MASK;
+ else
+ idx = eb_select_legacy_ring(eb, file, args);
+
+ ce = i915_gem_context_get_engine(eb->gem_context, idx);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = eb_pin_context(eb, ce);
+ intel_context_put(ce);
+
+ return err;
}
static void
@@ -2275,8 +2355,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
{
struct i915_execbuffer eb;
struct dma_fence *in_fence = NULL;
+ struct dma_fence *exec_fence = NULL;
struct sync_file *out_fence = NULL;
- intel_wakeref_t wakeref;
int out_fence_fd = -1;
int err;
@@ -2318,11 +2398,24 @@ i915_gem_do_execbuffer(struct drm_device *dev,
return -EINVAL;
}
+ if (args->flags & I915_EXEC_FENCE_SUBMIT) {
+ if (in_fence) {
+ err = -EINVAL;
+ goto err_in_fence;
+ }
+
+ exec_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
+ if (!exec_fence) {
+ err = -EINVAL;
+ goto err_in_fence;
+ }
+ }
+
if (args->flags & I915_EXEC_FENCE_OUT) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
err = out_fence_fd;
- goto err_in_fence;
+ goto err_exec_fence;
}
}
@@ -2336,12 +2429,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (unlikely(err))
goto err_destroy;
- eb.engine = eb_select_engine(eb.i915, file, args);
- if (!eb.engine) {
- err = -EINVAL;
- goto err_engine;
- }
-
/*
* Take a local wakeref for preparing to dispatch the execbuf as
* we expect to access the hardware fairly frequently in the
@@ -2349,16 +2436,20 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* wakeref that we hold until the GPU has been idle for at least
* 100ms.
*/
- wakeref = intel_runtime_pm_get(eb.i915);
+ intel_gt_pm_get(eb.i915);
err = i915_mutex_lock_interruptible(dev);
if (err)
goto err_rpm;
- err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
+ err = eb_select_engine(&eb, file, args);
if (unlikely(err))
goto err_unlock;
+ err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
+ if (unlikely(err))
+ goto err_engine;
+
err = eb_relocate(&eb);
if (err) {
/*
@@ -2442,7 +2533,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
GEM_BUG_ON(eb.reloc_cache.rq);
/* Allocate a request for this batch buffer nice and early. */
- eb.request = i915_request_alloc(eb.engine, eb.ctx);
+ eb.request = i915_request_create(eb.context);
if (IS_ERR(eb.request)) {
err = PTR_ERR(eb.request);
goto err_batch_unpin;
@@ -2454,6 +2545,13 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_request;
}
+ if (exec_fence) {
+ err = i915_request_await_execution(eb.request, exec_fence,
+ eb.engine->bond_execute);
+ if (err < 0)
+ goto err_request;
+ }
+
if (fences) {
err = await_fence_array(&eb, fences);
if (err)
@@ -2480,8 +2578,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb);
err_request:
- i915_request_add(eb.request);
add_to_client(eb.request, file);
+ i915_request_add(eb.request);
if (fences)
signal_fence_array(&eb, fences);
@@ -2503,17 +2601,20 @@ err_batch_unpin:
err_vma:
if (eb.exec)
eb_release_vmas(&eb);
+err_engine:
+ eb_unpin_context(&eb);
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
- intel_runtime_pm_put(eb.i915, wakeref);
-err_engine:
- i915_gem_context_put(eb.ctx);
+ intel_gt_pm_put(eb.i915);
+ i915_gem_context_put(eb.gem_context);
err_destroy:
eb_destroy(&eb);
err_out_fence:
if (out_fence_fd != -1)
put_unused_fd(out_fence_fd);
+err_exec_fence:
+ dma_fence_put(exec_fence);
err_in_fence:
dma_fence_put(in_fence);
return err;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_fence.c b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
new file mode 100644
index 000000000000..cf0439e6be83
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
@@ -0,0 +1,96 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_gem_object.h"
+
+struct stub_fence {
+ struct dma_fence dma;
+ struct i915_sw_fence chain;
+};
+
+static int __i915_sw_fence_call
+stub_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ struct stub_fence *stub = container_of(fence, typeof(*stub), chain);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ dma_fence_signal(&stub->dma);
+ break;
+
+ case FENCE_FREE:
+ dma_fence_put(&stub->dma);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static const char *stub_driver_name(struct dma_fence *fence)
+{
+ return DRIVER_NAME;
+}
+
+static const char *stub_timeline_name(struct dma_fence *fence)
+{
+ return "object";
+}
+
+static void stub_release(struct dma_fence *fence)
+{
+ struct stub_fence *stub = container_of(fence, typeof(*stub), dma);
+
+ i915_sw_fence_fini(&stub->chain);
+
+ BUILD_BUG_ON(offsetof(typeof(*stub), dma));
+ dma_fence_free(&stub->dma);
+}
+
+static const struct dma_fence_ops stub_fence_ops = {
+ .get_driver_name = stub_driver_name,
+ .get_timeline_name = stub_timeline_name,
+ .release = stub_release,
+};
+
+struct dma_fence *
+i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
+{
+ struct stub_fence *stub;
+
+ assert_object_held(obj);
+
+ stub = kmalloc(sizeof(*stub), GFP_KERNEL);
+ if (!stub)
+ return NULL;
+
+ i915_sw_fence_init(&stub->chain, stub_notify);
+ dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock,
+ to_i915(obj->base.dev)->mm.unordered_timeline,
+ 0);
+
+ if (i915_sw_fence_await_reservation(&stub->chain,
+ obj->base.resv, NULL,
+ true, I915_FENCE_TIMEOUT,
+ I915_FENCE_GFP) < 0)
+ goto err;
+
+ reservation_object_add_excl_fence(obj->base.resv, &stub->dma);
+
+ return &stub->dma;
+
+err:
+ stub_release(&stub->dma);
+ return NULL;
+}
+
+void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
+ struct dma_fence *fence)
+{
+ struct stub_fence *stub = container_of(fence, typeof(*stub), dma);
+
+ i915_sw_fence_commit(&stub->chain);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index ab627ed1269c..0c41e04ab8fa 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -1,36 +1,24 @@
/*
- * Copyright © 2014-2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2014-2016 Intel Corporation
*/
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/swiotlb.h>
+
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "i915_gem.h"
+#include "i915_gem_object.h"
+#include "i915_scatterlist.h"
+#include "i915_utils.h"
#define QUIET (__GFP_NORETRY | __GFP_NOWARN)
#define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
-/* convert swiotlb segment size into sensible units (pages)! */
-#define IO_TLB_SEGPAGES (IO_TLB_SEGSIZE << IO_TLB_SHIFT >> PAGE_SHIFT)
-
static void internal_free_pages(struct sg_table *st)
{
struct scatterlist *sg;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
new file mode 100644
index 000000000000..ddc7f2a52b3e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
@@ -0,0 +1,52 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef I915_GEM_IOCTLS_H
+#define I915_GEM_IOCTLS_H
+
+struct drm_device;
+struct drm_file;
+
+int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
new file mode 100644
index 000000000000..391621ee3cbb
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -0,0 +1,508 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2016 Intel Corporation
+ */
+
+#include <linux/mman.h>
+#include <linux/sizes.h>
+
+#include "i915_drv.h"
+#include "i915_gem_gtt.h"
+#include "i915_gem_ioctls.h"
+#include "i915_gem_object.h"
+#include "i915_vma.h"
+#include "intel_drv.h"
+
+static inline bool
+__vma_matches(struct vm_area_struct *vma, struct file *filp,
+ unsigned long addr, unsigned long size)
+{
+ if (vma->vm_file != filp)
+ return false;
+
+ return vma->vm_start == addr &&
+ (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
+}
+
+/**
+ * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
+ * it is mapped to.
+ * @dev: drm device
+ * @data: ioctl data blob
+ * @file: drm file
+ *
+ * While the mapping holds a reference on the contents of the object, it doesn't
+ * imply a ref on the object itself.
+ *
+ * IMPORTANT:
+ *
+ * DRM driver writers who look a this function as an example for how to do GEM
+ * mmap support, please don't implement mmap support like here. The modern way
+ * to implement DRM mmap support is with an mmap offset ioctl (like
+ * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
+ * That way debug tooling like valgrind will understand what's going on, hiding
+ * the mmap call in a driver private ioctl will break that. The i915 driver only
+ * does cpu mmaps this way because we didn't know better.
+ */
+int
+i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_mmap *args = data;
+ struct drm_i915_gem_object *obj;
+ unsigned long addr;
+
+ if (args->flags & ~(I915_MMAP_WC))
+ return -EINVAL;
+
+ if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
+ return -ENODEV;
+
+ obj = i915_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ /* prime objects have no backing filp to GEM mmap
+ * pages from.
+ */
+ if (!obj->base.filp) {
+ addr = -ENXIO;
+ goto err;
+ }
+
+ if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
+ addr = -EINVAL;
+ goto err;
+ }
+
+ addr = vm_mmap(obj->base.filp, 0, args->size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ args->offset);
+ if (IS_ERR_VALUE(addr))
+ goto err;
+
+ if (args->flags & I915_MMAP_WC) {
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+
+ if (down_write_killable(&mm->mmap_sem)) {
+ addr = -EINTR;
+ goto err;
+ }
+ vma = find_vma(mm, addr);
+ if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ else
+ addr = -ENOMEM;
+ up_write(&mm->mmap_sem);
+ if (IS_ERR_VALUE(addr))
+ goto err;
+
+ /* This may race, but that's ok, it only gets set */
+ WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
+ }
+ i915_gem_object_put(obj);
+
+ args->addr_ptr = (u64)addr;
+ return 0;
+
+err:
+ i915_gem_object_put(obj);
+ return addr;
+}
+
+static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
+}
+
+/**
+ * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
+ *
+ * A history of the GTT mmap interface:
+ *
+ * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
+ * aligned and suitable for fencing, and still fit into the available
+ * mappable space left by the pinned display objects. A classic problem
+ * we called the page-fault-of-doom where we would ping-pong between
+ * two objects that could not fit inside the GTT and so the memcpy
+ * would page one object in at the expense of the other between every
+ * single byte.
+ *
+ * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
+ * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
+ * object is too large for the available space (or simply too large
+ * for the mappable aperture!), a view is created instead and faulted
+ * into userspace. (This view is aligned and sized appropriately for
+ * fenced access.)
+ *
+ * 2 - Recognise WC as a separate cache domain so that we can flush the
+ * delayed writes via GTT before performing direct access via WC.
+ *
+ * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
+ * pagefault; swapin remains transparent.
+ *
+ * Restrictions:
+ *
+ * * snoopable objects cannot be accessed via the GTT. It can cause machine
+ * hangs on some architectures, corruption on others. An attempt to service
+ * a GTT page fault from a snoopable object will generate a SIGBUS.
+ *
+ * * the object must be able to fit into RAM (physical memory, though no
+ * limited to the mappable aperture).
+ *
+ *
+ * Caveats:
+ *
+ * * a new GTT page fault will synchronize rendering from the GPU and flush
+ * all data to system memory. Subsequent access will not be synchronized.
+ *
+ * * all mappings are revoked on runtime device suspend.
+ *
+ * * there are only 8, 16 or 32 fence registers to share between all users
+ * (older machines require fence register for display and blitter access
+ * as well). Contention of the fence registers will cause the previous users
+ * to be unmapped and any new access will generate new page faults.
+ *
+ * * running out of memory while servicing a fault may generate a SIGBUS,
+ * rather than the expected SIGSEGV.
+ */
+int i915_gem_mmap_gtt_version(void)
+{
+ return 3;
+}
+
+static inline struct i915_ggtt_view
+compute_partial_view(const struct drm_i915_gem_object *obj,
+ pgoff_t page_offset,
+ unsigned int chunk)
+{
+ struct i915_ggtt_view view;
+
+ if (i915_gem_object_is_tiled(obj))
+ chunk = roundup(chunk, tile_row_pages(obj));
+
+ view.type = I915_GGTT_VIEW_PARTIAL;
+ view.partial.offset = rounddown(page_offset, chunk);
+ view.partial.size =
+ min_t(unsigned int, chunk,
+ (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
+
+ /* If the partial covers the entire object, just create a normal VMA. */
+ if (chunk >= obj->base.size >> PAGE_SHIFT)
+ view.type = I915_GGTT_VIEW_NORMAL;
+
+ return view;
+}
+
+/**
+ * i915_gem_fault - fault a page into the GTT
+ * @vmf: fault info
+ *
+ * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
+ * from userspace. The fault handler takes care of binding the object to
+ * the GTT (if needed), allocating and programming a fence register (again,
+ * only if needed based on whether the old reg is still valid or the object
+ * is tiled) and inserting a new PTE into the faulting process.
+ *
+ * Note that the faulting process may involve evicting existing objects
+ * from the GTT and/or fence registers to make room. So performance may
+ * suffer if the GTT working set is large or there are few fence registers
+ * left.
+ *
+ * The current feature set supported by i915_gem_fault() and thus GTT mmaps
+ * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
+ */
+vm_fault_t i915_gem_fault(struct vm_fault *vmf)
+{
+#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
+ struct vm_area_struct *area = vmf->vma;
+ struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_runtime_pm *rpm = &i915->runtime_pm;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ bool write = area->vm_flags & VM_WRITE;
+ intel_wakeref_t wakeref;
+ struct i915_vma *vma;
+ pgoff_t page_offset;
+ int srcu;
+ int ret;
+
+ /* Sanity check that we allow writing into this object */
+ if (i915_gem_object_is_readonly(obj) && write)
+ return VM_FAULT_SIGBUS;
+
+ /* We don't use vmf->pgoff since that has the fake offset */
+ page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
+
+ trace_i915_gem_object_fault(obj, page_offset, true, write);
+
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ goto err;
+
+ wakeref = intel_runtime_pm_get(rpm);
+
+ srcu = i915_reset_trylock(i915);
+ if (srcu < 0) {
+ ret = srcu;
+ goto err_rpm;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto err_reset;
+
+ /* Access to snoopable pages through the GTT is incoherent. */
+ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
+ ret = -EFAULT;
+ goto err_unlock;
+ }
+
+ /* Now pin it into the GTT as needed */
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK |
+ PIN_NONFAULT);
+ if (IS_ERR(vma)) {
+ /* Use a partial view if it is bigger than available space */
+ struct i915_ggtt_view view =
+ compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
+ unsigned int flags;
+
+ flags = PIN_MAPPABLE;
+ if (view.type == I915_GGTT_VIEW_NORMAL)
+ flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
+
+ /*
+ * Userspace is now writing through an untracked VMA, abandon
+ * all hope that the hardware is able to track future writes.
+ */
+ obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
+
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
+ if (IS_ERR(vma) && !view.type) {
+ flags = PIN_MAPPABLE;
+ view.type = I915_GGTT_VIEW_PARTIAL;
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
+ }
+ }
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unlock;
+ }
+
+ ret = i915_vma_pin_fence(vma);
+ if (ret)
+ goto err_unpin;
+
+ /* Finally, remap it using the new GTT offset */
+ ret = remap_io_mapping(area,
+ area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
+ (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
+ min_t(u64, vma->size, area->vm_end - area->vm_start),
+ &ggtt->iomap);
+ if (ret)
+ goto err_fence;
+
+ /* Mark as being mmapped into userspace for later revocation */
+ assert_rpm_wakelock_held(rpm);
+ if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
+ list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
+ if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
+ intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
+ msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
+ GEM_BUG_ON(!obj->userfault_count);
+
+ i915_vma_set_ggtt_write(vma);
+
+err_fence:
+ i915_vma_unpin_fence(vma);
+err_unpin:
+ __i915_vma_unpin(vma);
+err_unlock:
+ mutex_unlock(&dev->struct_mutex);
+err_reset:
+ i915_reset_unlock(i915, srcu);
+err_rpm:
+ intel_runtime_pm_put(rpm, wakeref);
+ i915_gem_object_unpin_pages(obj);
+err:
+ switch (ret) {
+ case -EIO:
+ /*
+ * We eat errors when the gpu is terminally wedged to avoid
+ * userspace unduly crashing (gl has no provisions for mmaps to
+ * fail). But any other -EIO isn't ours (e.g. swap in failure)
+ * and so needs to be reported.
+ */
+ if (!i915_terminally_wedged(i915))
+ return VM_FAULT_SIGBUS;
+ /* else: fall through */
+ case -EAGAIN:
+ /*
+ * EAGAIN means the gpu is hung and we'll wait for the error
+ * handler to reset everything when re-faulting in
+ * i915_mutex_lock_interruptible.
+ */
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ case -EBUSY:
+ /*
+ * EBUSY is ok: this just means that another thread
+ * already did the job.
+ */
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ case -ENOSPC:
+ case -EFAULT:
+ return VM_FAULT_SIGBUS;
+ default:
+ WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+
+ GEM_BUG_ON(!obj->userfault_count);
+
+ obj->userfault_count = 0;
+ list_del(&obj->userfault_link);
+ drm_vma_node_unmap(&obj->base.vma_node,
+ obj->base.dev->anon_inode->i_mapping);
+
+ for_each_ggtt_vma(vma, obj)
+ i915_vma_unset_userfault(vma);
+}
+
+/**
+ * i915_gem_object_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * Preserve the reservation of the mmapping with the DRM core code, but
+ * relinquish ownership of the pages back to the system.
+ *
+ * It is vital that we remove the page mapping if we have mapped a tiled
+ * object through the GTT and then lose the fence register due to
+ * resource pressure. Similarly if the object has been moved out of the
+ * aperture, than pages mapped into userspace must be revoked. Removing the
+ * mapping will then trigger a page fault on the next user access, allowing
+ * fixup by i915_gem_fault().
+ */
+void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ intel_wakeref_t wakeref;
+
+ /* Serialisation between user GTT access and our code depends upon
+ * revoking the CPU's PTE whilst the mutex is held. The next user
+ * pagefault then has to wait until we release the mutex.
+ *
+ * Note that RPM complicates somewhat by adding an additional
+ * requirement that operations to the GGTT be made holding the RPM
+ * wakeref.
+ */
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ if (!obj->userfault_count)
+ goto out;
+
+ __i915_gem_object_release_mmap(obj);
+
+ /* Ensure that the CPU's PTE are revoked and there are not outstanding
+ * memory transactions from userspace before we return. The TLB
+ * flushing implied above by changing the PTE above *should* be
+ * sufficient, an extra barrier here just provides us with a bit
+ * of paranoid documentation about our requirement to serialise
+ * memory writes before touching registers / GSM.
+ */
+ wmb();
+
+out:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+}
+
+static int create_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ int err;
+
+ err = drm_gem_create_mmap_offset(&obj->base);
+ if (likely(!err))
+ return 0;
+
+ /* Attempt to reap some mmap space from dead objects */
+ do {
+ err = i915_gem_wait_for_idle(i915,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ break;
+
+ i915_gem_drain_freed_objects(i915);
+ err = drm_gem_create_mmap_offset(&obj->base);
+ if (!err)
+ break;
+
+ } while (flush_delayed_work(&i915->gem.retire_work));
+
+ return err;
+}
+
+int
+i915_gem_mmap_gtt(struct drm_file *file,
+ struct drm_device *dev,
+ u32 handle,
+ u64 *offset)
+{
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ obj = i915_gem_object_lookup(file, handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = create_mmap_offset(obj);
+ if (ret == 0)
+ *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
+
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+/**
+ * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
+ * @dev: DRM device
+ * @data: GTT mapping ioctl data
+ * @file: GEM object info
+ *
+ * Simply returns the fake offset to userspace so it can mmap it.
+ * The mmap call will end up in drm_gem_mmap(), which will set things
+ * up so we can get faults in the handler above.
+ *
+ * The fault handler will take care of binding the object into the GTT
+ * (since it may have been evicted to make room for something), allocating
+ * a fence register, and mapping the appropriate aperture address into
+ * userspace.
+ */
+int
+i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_mmap_gtt *args = data;
+
+ return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_gem_mman.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
new file mode 100644
index 000000000000..be6caccce0c5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -0,0 +1,398 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "display/intel_frontbuffer.h"
+
+#include "i915_drv.h"
+#include "i915_gem_clflush.h"
+#include "i915_gem_context.h"
+#include "i915_gem_object.h"
+#include "i915_globals.h"
+
+static struct i915_global_object {
+ struct i915_global base;
+ struct kmem_cache *slab_objects;
+} global;
+
+struct drm_i915_gem_object *i915_gem_object_alloc(void)
+{
+ return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
+}
+
+void i915_gem_object_free(struct drm_i915_gem_object *obj)
+{
+ return kmem_cache_free(global.slab_objects, obj);
+}
+
+static void
+frontbuffer_retire(struct i915_active_request *active,
+ struct i915_request *request)
+{
+ struct drm_i915_gem_object *obj =
+ container_of(active, typeof(*obj), frontbuffer_write);
+
+ intel_fb_obj_flush(obj, ORIGIN_CS);
+}
+
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops)
+{
+ mutex_init(&obj->mm.lock);
+
+ spin_lock_init(&obj->vma.lock);
+ INIT_LIST_HEAD(&obj->vma.list);
+
+ INIT_LIST_HEAD(&obj->lut_list);
+ INIT_LIST_HEAD(&obj->batch_pool_link);
+
+ init_rcu_head(&obj->rcu);
+
+ obj->ops = ops;
+
+ obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
+ i915_active_request_init(&obj->frontbuffer_write,
+ NULL, frontbuffer_retire);
+
+ obj->mm.madv = I915_MADV_WILLNEED;
+ INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
+ mutex_init(&obj->mm.get_page.lock);
+}
+
+/**
+ * Mark up the object's coherency levels for a given cache_level
+ * @obj: #drm_i915_gem_object
+ * @cache_level: cache level
+ */
+void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
+ unsigned int cache_level)
+{
+ obj->cache_level = cache_level;
+
+ if (cache_level != I915_CACHE_NONE)
+ obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
+ I915_BO_CACHE_COHERENT_FOR_WRITE);
+ else if (HAS_LLC(to_i915(obj->base.dev)))
+ obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
+ else
+ obj->cache_coherent = 0;
+
+ obj->cache_dirty =
+ !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
+}
+
+void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem);
+ struct drm_i915_file_private *fpriv = file->driver_priv;
+ struct i915_lut_handle *lut, *ln;
+ LIST_HEAD(close);
+
+ i915_gem_object_lock(obj);
+ list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
+ struct i915_gem_context *ctx = lut->ctx;
+
+ if (ctx->file_priv != fpriv)
+ continue;
+
+ i915_gem_context_get(ctx);
+ list_move(&lut->obj_link, &close);
+ }
+ i915_gem_object_unlock(obj);
+
+ list_for_each_entry_safe(lut, ln, &close, obj_link) {
+ struct i915_gem_context *ctx = lut->ctx;
+ struct i915_vma *vma;
+
+ /*
+ * We allow the process to have multiple handles to the same
+ * vma, in the same fd namespace, by virtue of flink/open.
+ */
+
+ mutex_lock(&ctx->mutex);
+ vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
+ if (vma) {
+ GEM_BUG_ON(vma->obj != obj);
+ GEM_BUG_ON(!atomic_read(&vma->open_count));
+ if (atomic_dec_and_test(&vma->open_count) &&
+ !i915_vma_is_ggtt(vma))
+ i915_vma_close(vma);
+ }
+ mutex_unlock(&ctx->mutex);
+
+ i915_gem_context_put(lut->ctx);
+ i915_lut_handle_free(lut);
+ i915_gem_object_put(obj);
+ }
+}
+
+static void __i915_gem_free_objects(struct drm_i915_private *i915,
+ struct llist_node *freed)
+{
+ struct drm_i915_gem_object *obj, *on;
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ llist_for_each_entry_safe(obj, on, freed, freed) {
+ struct i915_vma *vma, *vn;
+
+ trace_i915_gem_object_destroy(obj);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ GEM_BUG_ON(i915_gem_object_is_active(obj));
+ list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ vma->flags &= ~I915_VMA_PIN_MASK;
+ i915_vma_destroy(vma);
+ }
+ GEM_BUG_ON(!list_empty(&obj->vma.list));
+ GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
+
+ /*
+ * This serializes freeing with the shrinker. Since the free
+ * is delayed, first by RCU then by the workqueue, we want the
+ * shrinker to be able to free pages of unreferenced objects,
+ * or else we may oom whilst there are plenty of deferred
+ * freed objects.
+ */
+ if (i915_gem_object_has_pages(obj) &&
+ i915_gem_object_is_shrinkable(obj)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ list_del_init(&obj->mm.link);
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ }
+
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ GEM_BUG_ON(atomic_read(&obj->bind_count));
+ GEM_BUG_ON(obj->userfault_count);
+ GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
+ GEM_BUG_ON(!list_empty(&obj->lut_list));
+
+ if (obj->ops->release)
+ obj->ops->release(obj);
+
+ atomic_set(&obj->mm.pages_pin_count, 0);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ GEM_BUG_ON(i915_gem_object_has_pages(obj));
+
+ if (obj->base.import_attach)
+ drm_prime_gem_destroy(&obj->base, NULL);
+
+ drm_gem_object_release(&obj->base);
+
+ bitmap_free(obj->bit_17);
+ i915_gem_object_free(obj);
+
+ GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
+ atomic_dec(&i915->mm.free_count);
+
+ cond_resched();
+ }
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+}
+
+void i915_gem_flush_free_objects(struct drm_i915_private *i915)
+{
+ struct llist_node *freed;
+
+ /* Free the oldest, most stale object to keep the free_list short */
+ freed = NULL;
+ if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
+ /* Only one consumer of llist_del_first() allowed */
+ spin_lock(&i915->mm.free_lock);
+ freed = llist_del_first(&i915->mm.free_list);
+ spin_unlock(&i915->mm.free_lock);
+ }
+ if (unlikely(freed)) {
+ freed->next = NULL;
+ __i915_gem_free_objects(i915, freed);
+ }
+}
+
+static void __i915_gem_free_work(struct work_struct *work)
+{
+ struct drm_i915_private *i915 =
+ container_of(work, struct drm_i915_private, mm.free_work);
+ struct llist_node *freed;
+
+ /*
+ * All file-owned VMA should have been released by this point through
+ * i915_gem_close_object(), or earlier by i915_gem_context_close().
+ * However, the object may also be bound into the global GTT (e.g.
+ * older GPUs without per-process support, or for direct access through
+ * the GTT either for the user or for scanout). Those VMA still need to
+ * unbound now.
+ */
+
+ spin_lock(&i915->mm.free_lock);
+ while ((freed = llist_del_all(&i915->mm.free_list))) {
+ spin_unlock(&i915->mm.free_lock);
+
+ __i915_gem_free_objects(i915, freed);
+ if (need_resched())
+ return;
+
+ spin_lock(&i915->mm.free_lock);
+ }
+ spin_unlock(&i915->mm.free_lock);
+}
+
+static void __i915_gem_free_object_rcu(struct rcu_head *head)
+{
+ struct drm_i915_gem_object *obj =
+ container_of(head, typeof(*obj), rcu);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ /*
+ * We reuse obj->rcu for the freed list, so we had better not treat
+ * it like a rcu_head from this point forwards. And we expect all
+ * objects to be freed via this path.
+ */
+ destroy_rcu_head(&obj->rcu);
+
+ /*
+ * Since we require blocking on struct_mutex to unbind the freed
+ * object from the GPU before releasing resources back to the
+ * system, we can not do that directly from the RCU callback (which may
+ * be a softirq context), but must instead then defer that work onto a
+ * kthread. We use the RCU callback rather than move the freed object
+ * directly onto the work queue so that we can mix between using the
+ * worker and performing frees directly from subsequent allocations for
+ * crude but effective memory throttling.
+ */
+ if (llist_add(&obj->freed, &i915->mm.free_list))
+ queue_work(i915->wq, &i915->mm.free_work);
+}
+
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+
+ /*
+ * Before we free the object, make sure any pure RCU-only
+ * read-side critical sections are complete, e.g.
+ * i915_gem_busy_ioctl(). For the corresponding synchronized
+ * lookup see i915_gem_object_lookup_rcu().
+ */
+ atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
+ call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+}
+
+static inline enum fb_op_origin
+fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
+{
+ return (domain == I915_GEM_DOMAIN_GTT ?
+ obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
+}
+
+static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+ return !(obj->cache_level == I915_CACHE_NONE ||
+ obj->cache_level == I915_CACHE_WT);
+}
+
+void
+i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
+ unsigned int flush_domains)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct i915_vma *vma;
+
+ assert_object_held(obj);
+
+ if (!(obj->write_domain & flush_domains))
+ return;
+
+ switch (obj->write_domain) {
+ case I915_GEM_DOMAIN_GTT:
+ i915_gem_flush_ggtt_writes(dev_priv);
+
+ intel_fb_obj_flush(obj,
+ fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
+
+ for_each_ggtt_vma(vma, obj) {
+ if (vma->iomap)
+ continue;
+
+ i915_vma_unset_ggtt_write(vma);
+ }
+ break;
+
+ case I915_GEM_DOMAIN_WC:
+ wmb();
+ break;
+
+ case I915_GEM_DOMAIN_CPU:
+ i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
+ break;
+
+ case I915_GEM_DOMAIN_RENDER:
+ if (gpu_write_needs_clflush(obj))
+ obj->cache_dirty = true;
+ break;
+ }
+
+ obj->write_domain = 0;
+}
+
+void i915_gem_init__objects(struct drm_i915_private *i915)
+{
+ INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
+}
+
+static void i915_global_objects_shrink(void)
+{
+ kmem_cache_shrink(global.slab_objects);
+}
+
+static void i915_global_objects_exit(void)
+{
+ kmem_cache_destroy(global.slab_objects);
+}
+
+static struct i915_global_object global = { {
+ .shrink = i915_global_objects_shrink,
+ .exit = i915_global_objects_exit,
+} };
+
+int __init i915_global_objects_init(void)
+{
+ global.slab_objects =
+ KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_objects)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/huge_gem_object.c"
+#include "selftests/huge_pages.c"
+#include "selftests/i915_gem_object.c"
+#include "selftests/i915_gem_coherency.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
new file mode 100644
index 000000000000..dfebd5706f16
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -0,0 +1,430 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef __I915_GEM_OBJECT_H__
+#define __I915_GEM_OBJECT_H__
+
+#include <drm/drm_gem.h>
+#include <drm/drm_file.h>
+#include <drm/drm_device.h>
+
+#include <drm/i915_drm.h>
+
+#include "i915_gem_object_types.h"
+
+#include "i915_gem_gtt.h"
+
+void i915_gem_init__objects(struct drm_i915_private *i915);
+
+struct drm_i915_gem_object *i915_gem_object_alloc(void);
+void i915_gem_object_free(struct drm_i915_gem_object *obj);
+
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops);
+struct drm_i915_gem_object *
+i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size);
+struct drm_i915_gem_object *
+i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
+ const void *data, size_t size);
+
+extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
+void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
+ struct sg_table *pages,
+ bool needs_clflush);
+
+int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
+
+void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
+void i915_gem_free_object(struct drm_gem_object *obj);
+
+void i915_gem_flush_free_objects(struct drm_i915_private *i915);
+
+struct sg_table *
+__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
+void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+
+/**
+ * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
+ * @filp: DRM file private date
+ * @handle: userspace handle
+ *
+ * Returns:
+ *
+ * A pointer to the object named by the handle if such exists on @filp, NULL
+ * otherwise. This object is only valid whilst under the RCU read lock, and
+ * note carefully the object may be in the process of being destroyed.
+ */
+static inline struct drm_i915_gem_object *
+i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
+{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
+#endif
+ return idr_find(&file->object_idr, handle);
+}
+
+static inline struct drm_i915_gem_object *
+i915_gem_object_lookup(struct drm_file *file, u32 handle)
+{
+ struct drm_i915_gem_object *obj;
+
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, handle);
+ if (obj && !kref_get_unless_zero(&obj->base.refcount))
+ obj = NULL;
+ rcu_read_unlock();
+
+ return obj;
+}
+
+__deprecated
+extern struct drm_gem_object *
+drm_gem_object_lookup(struct drm_file *file, u32 handle);
+
+__attribute__((nonnull))
+static inline struct drm_i915_gem_object *
+i915_gem_object_get(struct drm_i915_gem_object *obj)
+{
+ drm_gem_object_get(&obj->base);
+ return obj;
+}
+
+__attribute__((nonnull))
+static inline void
+i915_gem_object_put(struct drm_i915_gem_object *obj)
+{
+ __drm_gem_object_put(&obj->base);
+}
+
+#define assert_object_held(obj) reservation_object_assert_held((obj)->base.resv)
+
+static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
+{
+ reservation_object_lock(obj->base.resv, NULL);
+}
+
+static inline int
+i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
+{
+ return reservation_object_lock_interruptible(obj->base.resv, NULL);
+}
+
+static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
+{
+ reservation_object_unlock(obj->base.resv);
+}
+
+struct dma_fence *
+i915_gem_object_lock_fence(struct drm_i915_gem_object *obj);
+void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
+ struct dma_fence *fence);
+
+static inline void
+i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
+{
+ obj->base.vma_node.readonly = true;
+}
+
+static inline bool
+i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
+{
+ return obj->base.vma_node.readonly;
+}
+
+static inline bool
+i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
+}
+
+static inline bool
+i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
+}
+
+static inline bool
+i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
+}
+
+static inline bool
+i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
+}
+
+static inline bool
+i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
+{
+ return READ_ONCE(obj->active_count);
+}
+
+static inline bool
+i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
+{
+ return READ_ONCE(obj->framebuffer_references);
+}
+
+static inline unsigned int
+i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
+{
+ return obj->tiling_and_stride & TILING_MASK;
+}
+
+static inline bool
+i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
+}
+
+static inline unsigned int
+i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
+{
+ return obj->tiling_and_stride & STRIDE_MASK;
+}
+
+static inline unsigned int
+i915_gem_tile_height(unsigned int tiling)
+{
+ GEM_BUG_ON(!tiling);
+ return tiling == I915_TILING_Y ? 32 : 8;
+}
+
+static inline unsigned int
+i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
+{
+ return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
+}
+
+static inline unsigned int
+i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
+{
+ return (i915_gem_object_get_stride(obj) *
+ i915_gem_object_get_tile_height(obj));
+}
+
+int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
+ unsigned int tiling, unsigned int stride);
+
+struct scatterlist *
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+ unsigned int n, unsigned int *offset);
+
+struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj,
+ unsigned int n);
+
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n);
+
+dma_addr_t
+i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned int *len);
+
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+ unsigned long n);
+
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages,
+ unsigned int sg_page_sizes);
+
+int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+
+static inline int __must_check
+i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+ might_lock(&obj->mm.lock);
+
+ if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
+ return 0;
+
+ return __i915_gem_object_get_pages(obj);
+}
+
+static inline bool
+i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
+{
+ return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
+}
+
+static inline void
+__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+
+ atomic_inc(&obj->mm.pages_pin_count);
+}
+
+static inline bool
+i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
+{
+ return atomic_read(&obj->mm.pages_pin_count);
+}
+
+static inline void
+__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ atomic_dec(&obj->mm.pages_pin_count);
+}
+
+static inline void
+i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+ __i915_gem_object_unpin_pages(obj);
+}
+
+enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
+ I915_MM_NORMAL = 0,
+ I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
+};
+
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass);
+void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
+
+enum i915_map_type {
+ I915_MAP_WB = 0,
+ I915_MAP_WC,
+#define I915_MAP_OVERRIDE BIT(31)
+ I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
+ I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
+};
+
+/**
+ * i915_gem_object_pin_map - return a contiguous mapping of the entire object
+ * @obj: the object to map into kernel address space
+ * @type: the type of mapping, used to select pgprot_t
+ *
+ * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
+ * pages and then returns a contiguous mapping of the backing storage into
+ * the kernel address space. Based on the @type of mapping, the PTE will be
+ * set to either WriteBack or WriteCombine (via pgprot_t).
+ *
+ * The caller is responsible for calling i915_gem_object_unpin_map() when the
+ * mapping is no longer required.
+ *
+ * Returns the pointer through which to access the mapped object, or an
+ * ERR_PTR() on error.
+ */
+void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
+ enum i915_map_type type);
+
+void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ unsigned long size);
+static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
+{
+ __i915_gem_object_flush_map(obj, 0, obj->base.size);
+}
+
+/**
+ * i915_gem_object_unpin_map - releases an earlier mapping
+ * @obj: the object to unmap
+ *
+ * After pinning the object and mapping its pages, once you are finished
+ * with your access, call i915_gem_object_unpin_map() to release the pin
+ * upon the mapping. Once the pin count reaches zero, that mapping may be
+ * removed.
+ */
+static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin_pages(obj);
+}
+
+void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
+void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
+
+void
+i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
+ unsigned int flush_domains);
+
+int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
+ unsigned int *needs_clflush);
+int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
+ unsigned int *needs_clflush);
+#define CLFLUSH_BEFORE BIT(0)
+#define CLFLUSH_AFTER BIT(1)
+#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
+
+static inline void
+i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_unlock(obj);
+}
+
+static inline struct intel_engine_cs *
+i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
+{
+ struct intel_engine_cs *engine = NULL;
+ struct dma_fence *fence;
+
+ rcu_read_lock();
+ fence = reservation_object_get_excl_rcu(obj->base.resv);
+ rcu_read_unlock();
+
+ if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
+ engine = to_request(fence)->engine;
+ dma_fence_put(fence);
+
+ return engine;
+}
+
+void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
+ unsigned int cache_level);
+void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
+
+int __must_check
+i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
+int __must_check
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
+int __must_check
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
+struct i915_vma * __must_check
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ u32 alignment,
+ const struct i915_ggtt_view *view,
+ unsigned int flags);
+void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
+
+static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+ if (obj->cache_dirty)
+ return false;
+
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
+ return true;
+
+ return obj->pin_global; /* currently in use by HW, keep flushed */
+}
+
+static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
+{
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ if (cpu_write_needs_clflush(obj))
+ obj->cache_dirty = true;
+}
+
+int i915_gem_object_wait(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ long timeout);
+int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ const struct i915_sched_attr *attr);
+#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
new file mode 100644
index 000000000000..cb42e3a312e2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_gem_object_blt.h"
+
+#include "i915_gem_clflush.h"
+#include "intel_drv.h"
+
+int intel_emit_vma_fill_blt(struct i915_request *rq,
+ struct i915_vma *vma,
+ u32 value)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ if (INTEL_GEN(rq->i915) >= 8) {
+ *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
+ *cs++ = 0;
+ *cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cs++ = lower_32_bits(vma->node.start);
+ *cs++ = upper_32_bits(vma->node.start);
+ *cs++ = value;
+ *cs++ = MI_NOOP;
+ } else {
+ *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
+ *cs++ = 0;
+ *cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cs++ = vma->node.start;
+ *cs++ = value;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
+ struct intel_context *ce,
+ u32 value)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_gem_context *ctx = ce->gem_context;
+ struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ int err;
+
+ /* XXX: ce->vm please */
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (unlikely(err))
+ return err;
+
+ if (obj->cache_dirty & ~obj->cache_coherent) {
+ i915_gem_object_lock(obj);
+ i915_gem_clflush_object(obj, 0);
+ i915_gem_object_unlock(obj);
+ }
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_unpin;
+ }
+
+ err = i915_request_await_object(rq, obj, true);
+ if (unlikely(err))
+ goto out_request;
+
+ if (ce->engine->emit_init_breadcrumb) {
+ err = ce->engine->emit_init_breadcrumb(rq);
+ if (unlikely(err))
+ goto out_request;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+ if (unlikely(err))
+ goto out_request;
+
+ err = intel_emit_vma_fill_blt(rq, vma, value);
+out_request:
+ if (unlikely(err))
+ i915_request_skip(rq, err);
+
+ i915_request_add(rq);
+out_unpin:
+ i915_vma_unpin(vma);
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_gem_object_blt.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
new file mode 100644
index 000000000000..7ec7de6ac0c0
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_OBJECT_BLT_H__
+#define __I915_GEM_OBJECT_BLT_H__
+
+#include <linux/types.h>
+
+struct drm_i915_gem_object;
+struct intel_context;
+struct i915_request;
+struct i915_vma;
+
+int intel_emit_vma_fill_blt(struct i915_request *rq,
+ struct i915_vma *vma,
+ u32 value);
+
+int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
+ struct intel_context *ce,
+ u32 value);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
new file mode 100644
index 000000000000..18bf4f8d6d80
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -0,0 +1,262 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef __I915_GEM_OBJECT_TYPES_H__
+#define __I915_GEM_OBJECT_TYPES_H__
+
+#include <drm/drm_gem.h>
+
+#include "i915_active.h"
+#include "i915_selftest.h"
+
+struct drm_i915_gem_object;
+
+/*
+ * struct i915_lut_handle tracks the fast lookups from handle to vma used
+ * for execbuf. Although we use a radixtree for that mapping, in order to
+ * remove them as the object or context is closed, we need a secondary list
+ * and a translation entry (i915_lut_handle).
+ */
+struct i915_lut_handle {
+ struct list_head obj_link;
+ struct i915_gem_context *ctx;
+ u32 handle;
+};
+
+struct drm_i915_gem_object_ops {
+ unsigned int flags;
+#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
+#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
+#define I915_GEM_OBJECT_IS_PROXY BIT(2)
+#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3)
+
+ /* Interface between the GEM object and its backing storage.
+ * get_pages() is called once prior to the use of the associated set
+ * of pages before to binding them into the GTT, and put_pages() is
+ * called after we no longer need them. As we expect there to be
+ * associated cost with migrating pages between the backing storage
+ * and making them available for the GPU (e.g. clflush), we may hold
+ * onto the pages after they are no longer referenced by the GPU
+ * in case they may be used again shortly (for example migrating the
+ * pages to a different memory domain within the GTT). put_pages()
+ * will therefore most likely be called when the object itself is
+ * being released or under memory pressure (where we attempt to
+ * reap pages for the shrinker).
+ */
+ int (*get_pages)(struct drm_i915_gem_object *obj);
+ void (*put_pages)(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+ void (*truncate)(struct drm_i915_gem_object *obj);
+ void (*writeback)(struct drm_i915_gem_object *obj);
+
+ int (*pwrite)(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *arg);
+
+ int (*dmabuf_export)(struct drm_i915_gem_object *obj);
+ void (*release)(struct drm_i915_gem_object *obj);
+};
+
+struct drm_i915_gem_object {
+ struct drm_gem_object base;
+
+ const struct drm_i915_gem_object_ops *ops;
+
+ struct {
+ /**
+ * @vma.lock: protect the list/tree of vmas
+ */
+ spinlock_t lock;
+
+ /**
+ * @vma.list: List of VMAs backed by this object
+ *
+ * The VMA on this list are ordered by type, all GGTT vma are
+ * placed at the head and all ppGTT vma are placed at the tail.
+ * The different types of GGTT vma are unordered between
+ * themselves, use the @vma.tree (which has a defined order
+ * between all VMA) to quickly find an exact match.
+ */
+ struct list_head list;
+
+ /**
+ * @vma.tree: Ordered tree of VMAs backed by this object
+ *
+ * All VMA created for this object are placed in the @vma.tree
+ * for fast retrieval via a binary search in
+ * i915_vma_instance(). They are also added to @vma.list for
+ * easy iteration.
+ */
+ struct rb_root tree;
+ } vma;
+
+ /**
+ * @lut_list: List of vma lookup entries in use for this object.
+ *
+ * If this object is closed, we need to remove all of its VMA from
+ * the fast lookup index in associated contexts; @lut_list provides
+ * this translation from object to context->handles_vma.
+ */
+ struct list_head lut_list;
+
+ /** Stolen memory for this object, instead of being backed by shmem. */
+ struct drm_mm_node *stolen;
+ union {
+ struct rcu_head rcu;
+ struct llist_node freed;
+ };
+
+ /**
+ * Whether the object is currently in the GGTT mmap.
+ */
+ unsigned int userfault_count;
+ struct list_head userfault_link;
+
+ struct list_head batch_pool_link;
+ I915_SELFTEST_DECLARE(struct list_head st_link);
+
+ /*
+ * Is the object to be mapped as read-only to the GPU
+ * Only honoured if hardware has relevant pte bit
+ */
+ unsigned int cache_level:3;
+ unsigned int cache_coherent:2;
+#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
+#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
+ unsigned int cache_dirty:1;
+
+ /**
+ * @read_domains: Read memory domains.
+ *
+ * These monitor which caches contain read/write data related to the
+ * object. When transitioning from one set of domains to another,
+ * the driver is called to ensure that caches are suitably flushed and
+ * invalidated.
+ */
+ u16 read_domains;
+
+ /**
+ * @write_domain: Corresponding unique write memory domain.
+ */
+ u16 write_domain;
+
+ atomic_t frontbuffer_bits;
+ unsigned int frontbuffer_ggtt_origin; /* write once */
+ struct i915_active_request frontbuffer_write;
+
+ /** Current tiling stride for the object, if it's tiled. */
+ unsigned int tiling_and_stride;
+#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
+#define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
+#define STRIDE_MASK (~TILING_MASK)
+
+ /** Count of VMA actually bound by this object */
+ atomic_t bind_count;
+ unsigned int active_count;
+ /** Count of how many global VMA are currently pinned for use by HW */
+ unsigned int pin_global;
+
+ struct {
+ struct mutex lock; /* protects the pages and their use */
+ atomic_t pages_pin_count;
+
+ struct sg_table *pages;
+ void *mapping;
+
+ /* TODO: whack some of this into the error state */
+ struct i915_page_sizes {
+ /**
+ * The sg mask of the pages sg_table. i.e the mask of
+ * of the lengths for each sg entry.
+ */
+ unsigned int phys;
+
+ /**
+ * The gtt page sizes we are allowed to use given the
+ * sg mask and the supported page sizes. This will
+ * express the smallest unit we can use for the whole
+ * object, as well as the larger sizes we may be able
+ * to use opportunistically.
+ */
+ unsigned int sg;
+
+ /**
+ * The actual gtt page size usage. Since we can have
+ * multiple vma associated with this object we need to
+ * prevent any trampling of state, hence a copy of this
+ * struct also lives in each vma, therefore the gtt
+ * value here should only be read/write through the vma.
+ */
+ unsigned int gtt;
+ } page_sizes;
+
+ I915_SELFTEST_DECLARE(unsigned int page_mask);
+
+ struct i915_gem_object_page_iter {
+ struct scatterlist *sg_pos;
+ unsigned int sg_idx; /* in pages, but 32bit eek! */
+
+ struct radix_tree_root radix;
+ struct mutex lock; /* protects this cache */
+ } get_page;
+
+ /**
+ * Element within i915->mm.unbound_list or i915->mm.bound_list,
+ * locked by i915->mm.obj_lock.
+ */
+ struct list_head link;
+
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ unsigned int madv:2;
+
+ /**
+ * This is set if the object has been written to since the
+ * pages were last acquired.
+ */
+ bool dirty:1;
+
+ /**
+ * This is set if the object has been pinned due to unknown
+ * swizzling.
+ */
+ bool quirked:1;
+ } mm;
+
+ /** References from framebuffers, locks out tiling changes. */
+ unsigned int framebuffer_references;
+
+ /** Record of address bit 17 of each page at last unbind. */
+ unsigned long *bit_17;
+
+ union {
+ struct i915_gem_userptr {
+ uintptr_t ptr;
+
+ struct i915_mm_struct *mm;
+ struct i915_mmu_object *mmu_object;
+ struct work_struct *work;
+ } userptr;
+
+ unsigned long scratch;
+
+ void *gvt_info;
+ };
+
+ /** for phys allocated objects */
+ struct drm_dma_handle *phys_handle;
+};
+
+static inline struct drm_i915_gem_object *
+to_intel_bo(struct drm_gem_object *gem)
+{
+ /* Assert that to_intel_bo(NULL) == NULL */
+ BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
+
+ return container_of(gem, struct drm_i915_gem_object, base);
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
new file mode 100644
index 000000000000..b36ad269f4ea
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -0,0 +1,544 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2016 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_gem_object.h"
+#include "i915_scatterlist.h"
+
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages,
+ unsigned int sg_page_sizes)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ int i;
+
+ lockdep_assert_held(&obj->mm.lock);
+
+ /* Make the pages coherent with the GPU (flushing any swapin). */
+ if (obj->cache_dirty) {
+ obj->write_domain = 0;
+ if (i915_gem_object_has_struct_page(obj))
+ drm_clflush_sg(pages);
+ obj->cache_dirty = false;
+ }
+
+ obj->mm.get_page.sg_pos = pages->sgl;
+ obj->mm.get_page.sg_idx = 0;
+
+ obj->mm.pages = pages;
+
+ if (i915_gem_object_is_tiled(obj) &&
+ i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ GEM_BUG_ON(obj->mm.quirked);
+ __i915_gem_object_pin_pages(obj);
+ obj->mm.quirked = true;
+ }
+
+ GEM_BUG_ON(!sg_page_sizes);
+ obj->mm.page_sizes.phys = sg_page_sizes;
+
+ /*
+ * Calculate the supported page-sizes which fit into the given
+ * sg_page_sizes. This will give us the page-sizes which we may be able
+ * to use opportunistically when later inserting into the GTT. For
+ * example if phys=2G, then in theory we should be able to use 1G, 2M,
+ * 64K or 4K pages, although in practice this will depend on a number of
+ * other factors.
+ */
+ obj->mm.page_sizes.sg = 0;
+ for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
+ if (obj->mm.page_sizes.phys & ~0u << i)
+ obj->mm.page_sizes.sg |= BIT(i);
+ }
+ GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
+
+ if (i915_gem_object_is_shrinkable(obj)) {
+ struct list_head *list;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+
+ i915->mm.shrink_count++;
+ i915->mm.shrink_memory += obj->base.size;
+
+ if (obj->mm.madv != I915_MADV_WILLNEED)
+ list = &i915->mm.purge_list;
+ else
+ list = &i915->mm.shrink_list;
+ list_add_tail(&obj->mm.link, list);
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ }
+}
+
+int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ int err;
+
+ if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
+ DRM_DEBUG("Attempting to obtain a purgeable object\n");
+ return -EFAULT;
+ }
+
+ err = obj->ops->get_pages(obj);
+ GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
+
+ return err;
+}
+
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_pin_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_unpin_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ int err;
+
+ err = mutex_lock_interruptible(&obj->mm.lock);
+ if (err)
+ return err;
+
+ if (unlikely(!i915_gem_object_has_pages(obj))) {
+ GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+
+ err = ____i915_gem_object_get_pages(obj);
+ if (err)
+ goto unlock;
+
+ smp_mb__before_atomic();
+ }
+ atomic_inc(&obj->mm.pages_pin_count);
+
+unlock:
+ mutex_unlock(&obj->mm.lock);
+ return err;
+}
+
+/* Immediately discard the backing storage */
+void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
+{
+ drm_gem_free_mmap_offset(&obj->base);
+ if (obj->ops->truncate)
+ obj->ops->truncate(obj);
+}
+
+/* Try to discard unwanted pages */
+void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->mm.lock);
+ GEM_BUG_ON(i915_gem_object_has_pages(obj));
+
+ if (obj->ops->writeback)
+ obj->ops->writeback(obj);
+}
+
+static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ rcu_read_lock();
+ radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
+ radix_tree_delete(&obj->mm.get_page.radix, iter.index);
+ rcu_read_unlock();
+}
+
+struct sg_table *
+__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct sg_table *pages;
+
+ pages = fetch_and_zero(&obj->mm.pages);
+ if (IS_ERR_OR_NULL(pages))
+ return pages;
+
+ if (i915_gem_object_is_shrinkable(obj)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+
+ list_del(&obj->mm.link);
+ i915->mm.shrink_count--;
+ i915->mm.shrink_memory -= obj->base.size;
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ }
+
+ if (obj->mm.mapping) {
+ void *ptr;
+
+ ptr = page_mask_bits(obj->mm.mapping);
+ if (is_vmalloc_addr(ptr))
+ vunmap(ptr);
+ else
+ kunmap(kmap_to_page(ptr));
+
+ obj->mm.mapping = NULL;
+ }
+
+ __i915_gem_object_reset_page_iter(obj);
+ obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+
+ return pages;
+}
+
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass)
+{
+ struct sg_table *pages;
+ int err;
+
+ if (i915_gem_object_has_pinned_pages(obj))
+ return -EBUSY;
+
+ GEM_BUG_ON(atomic_read(&obj->bind_count));
+
+ /* May be called by shrinker from within get_pages() (on another bo) */
+ mutex_lock_nested(&obj->mm.lock, subclass);
+ if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ /*
+ * ->put_pages might need to allocate memory for the bit17 swizzle
+ * array, hence protect them from being reaped by removing them from gtt
+ * lists early.
+ */
+ pages = __i915_gem_object_unset_pages(obj);
+
+ /*
+ * XXX Temporary hijinx to avoid updating all backends to handle
+ * NULL pages. In the future, when we have more asynchronous
+ * get_pages backends we should be better able to handle the
+ * cancellation of the async task in a more uniform manner.
+ */
+ if (!pages && !i915_gem_object_needs_async_cancel(obj))
+ pages = ERR_PTR(-EINVAL);
+
+ if (!IS_ERR(pages))
+ obj->ops->put_pages(obj, pages);
+
+ err = 0;
+unlock:
+ mutex_unlock(&obj->mm.lock);
+
+ return err;
+}
+
+/* The 'mapping' part of i915_gem_object_pin_map() below */
+static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
+ enum i915_map_type type)
+{
+ unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
+ struct sg_table *sgt = obj->mm.pages;
+ struct sgt_iter sgt_iter;
+ struct page *page;
+ struct page *stack_pages[32];
+ struct page **pages = stack_pages;
+ unsigned long i = 0;
+ pgprot_t pgprot;
+ void *addr;
+
+ /* A single page can always be kmapped */
+ if (n_pages == 1 && type == I915_MAP_WB)
+ return kmap(sg_page(sgt->sgl));
+
+ if (n_pages > ARRAY_SIZE(stack_pages)) {
+ /* Too big for stack -- allocate temporary array instead */
+ pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return NULL;
+ }
+
+ for_each_sgt_page(page, sgt_iter, sgt)
+ pages[i++] = page;
+
+ /* Check that we have the expected number of pages */
+ GEM_BUG_ON(i != n_pages);
+
+ switch (type) {
+ default:
+ MISSING_CASE(type);
+ /* fallthrough to use PAGE_KERNEL anyway */
+ case I915_MAP_WB:
+ pgprot = PAGE_KERNEL;
+ break;
+ case I915_MAP_WC:
+ pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
+ break;
+ }
+ addr = vmap(pages, n_pages, 0, pgprot);
+
+ if (pages != stack_pages)
+ kvfree(pages);
+
+ return addr;
+}
+
+/* get, pin, and map the pages of the object into kernel space */
+void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
+ enum i915_map_type type)
+{
+ enum i915_map_type has_type;
+ bool pinned;
+ void *ptr;
+ int err;
+
+ if (unlikely(!i915_gem_object_has_struct_page(obj)))
+ return ERR_PTR(-ENXIO);
+
+ err = mutex_lock_interruptible(&obj->mm.lock);
+ if (err)
+ return ERR_PTR(err);
+
+ pinned = !(type & I915_MAP_OVERRIDE);
+ type &= ~I915_MAP_OVERRIDE;
+
+ if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
+ if (unlikely(!i915_gem_object_has_pages(obj))) {
+ GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+
+ err = ____i915_gem_object_get_pages(obj);
+ if (err)
+ goto err_unlock;
+
+ smp_mb__before_atomic();
+ }
+ atomic_inc(&obj->mm.pages_pin_count);
+ pinned = false;
+ }
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+
+ ptr = page_unpack_bits(obj->mm.mapping, &has_type);
+ if (ptr && has_type != type) {
+ if (pinned) {
+ err = -EBUSY;
+ goto err_unpin;
+ }
+
+ if (is_vmalloc_addr(ptr))
+ vunmap(ptr);
+ else
+ kunmap(kmap_to_page(ptr));
+
+ ptr = obj->mm.mapping = NULL;
+ }
+
+ if (!ptr) {
+ ptr = i915_gem_object_map(obj, type);
+ if (!ptr) {
+ err = -ENOMEM;
+ goto err_unpin;
+ }
+
+ obj->mm.mapping = page_pack_bits(ptr, type);
+ }
+
+out_unlock:
+ mutex_unlock(&obj->mm.lock);
+ return ptr;
+
+err_unpin:
+ atomic_dec(&obj->mm.pages_pin_count);
+err_unlock:
+ ptr = ERR_PTR(err);
+ goto out_unlock;
+}
+
+void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ unsigned long size)
+{
+ enum i915_map_type has_type;
+ void *ptr;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
+ offset, size, obj->base.size));
+
+ obj->mm.dirty = true;
+
+ if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
+ return;
+
+ ptr = page_unpack_bits(obj->mm.mapping, &has_type);
+ if (has_type == I915_MAP_WC)
+ return;
+
+ drm_clflush_virt_range(ptr + offset, size);
+ if (size == obj->base.size) {
+ obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
+ obj->cache_dirty = false;
+ }
+}
+
+struct scatterlist *
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+ unsigned int n,
+ unsigned int *offset)
+{
+ struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
+ struct scatterlist *sg;
+ unsigned int idx, count;
+
+ might_sleep();
+ GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ /* As we iterate forward through the sg, we record each entry in a
+ * radixtree for quick repeated (backwards) lookups. If we have seen
+ * this index previously, we will have an entry for it.
+ *
+ * Initial lookup is O(N), but this is amortized to O(1) for
+ * sequential page access (where each new request is consecutive
+ * to the previous one). Repeated lookups are O(lg(obj->base.size)),
+ * i.e. O(1) with a large constant!
+ */
+ if (n < READ_ONCE(iter->sg_idx))
+ goto lookup;
+
+ mutex_lock(&iter->lock);
+
+ /* We prefer to reuse the last sg so that repeated lookup of this
+ * (or the subsequent) sg are fast - comparing against the last
+ * sg is faster than going through the radixtree.
+ */
+
+ sg = iter->sg_pos;
+ idx = iter->sg_idx;
+ count = __sg_page_count(sg);
+
+ while (idx + count <= n) {
+ void *entry;
+ unsigned long i;
+ int ret;
+
+ /* If we cannot allocate and insert this entry, or the
+ * individual pages from this range, cancel updating the
+ * sg_idx so that on this lookup we are forced to linearly
+ * scan onwards, but on future lookups we will try the
+ * insertion again (in which case we need to be careful of
+ * the error return reporting that we have already inserted
+ * this index).
+ */
+ ret = radix_tree_insert(&iter->radix, idx, sg);
+ if (ret && ret != -EEXIST)
+ goto scan;
+
+ entry = xa_mk_value(idx);
+ for (i = 1; i < count; i++) {
+ ret = radix_tree_insert(&iter->radix, idx + i, entry);
+ if (ret && ret != -EEXIST)
+ goto scan;
+ }
+
+ idx += count;
+ sg = ____sg_next(sg);
+ count = __sg_page_count(sg);
+ }
+
+scan:
+ iter->sg_pos = sg;
+ iter->sg_idx = idx;
+
+ mutex_unlock(&iter->lock);
+
+ if (unlikely(n < idx)) /* insertion completed by another thread */
+ goto lookup;
+
+ /* In case we failed to insert the entry into the radixtree, we need
+ * to look beyond the current sg.
+ */
+ while (idx + count <= n) {
+ idx += count;
+ sg = ____sg_next(sg);
+ count = __sg_page_count(sg);
+ }
+
+ *offset = n - idx;
+ return sg;
+
+lookup:
+ rcu_read_lock();
+
+ sg = radix_tree_lookup(&iter->radix, n);
+ GEM_BUG_ON(!sg);
+
+ /* If this index is in the middle of multi-page sg entry,
+ * the radix tree will contain a value entry that points
+ * to the start of that range. We will return the pointer to
+ * the base page and the offset of this page within the
+ * sg entry's range.
+ */
+ *offset = 0;
+ if (unlikely(xa_is_value(sg))) {
+ unsigned long base = xa_to_value(sg);
+
+ sg = radix_tree_lookup(&iter->radix, base);
+ GEM_BUG_ON(!sg);
+
+ *offset = n - base;
+ }
+
+ rcu_read_unlock();
+
+ return sg;
+}
+
+struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
+{
+ struct scatterlist *sg;
+ unsigned int offset;
+
+ GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
+
+ sg = i915_gem_object_get_sg(obj, n, &offset);
+ return nth_page(sg_page(sg), offset);
+}
+
+/* Like i915_gem_object_get_page(), but mark the returned page dirty */
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n)
+{
+ struct page *page;
+
+ page = i915_gem_object_get_page(obj, n);
+ if (!obj->mm.dirty)
+ set_page_dirty(page);
+
+ return page;
+}
+
+dma_addr_t
+i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned int *len)
+{
+ struct scatterlist *sg;
+ unsigned int offset;
+
+ sg = i915_gem_object_get_sg(obj, n, &offset);
+
+ if (len)
+ *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
+
+ return sg_dma_address(sg) + (offset << PAGE_SHIFT);
+}
+
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+ unsigned long n)
+{
+ return i915_gem_object_get_dma_address_len(obj, n, NULL);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
new file mode 100644
index 000000000000..2deac933cf59
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -0,0 +1,212 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2016 Intel Corporation
+ */
+
+#include <linux/highmem.h>
+#include <linux/shmem_fs.h>
+#include <linux/swap.h>
+
+#include <drm/drm.h> /* for drm_legacy.h! */
+#include <drm/drm_cache.h>
+#include <drm/drm_legacy.h> /* for drm_pci.h! */
+#include <drm/drm_pci.h>
+
+#include "i915_drv.h"
+#include "i915_gem_object.h"
+#include "i915_scatterlist.h"
+
+static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
+{
+ struct address_space *mapping = obj->base.filp->f_mapping;
+ struct drm_dma_handle *phys;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ char *vaddr;
+ int i;
+ int err;
+
+ if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
+ return -EINVAL;
+
+ /* Always aligning to the object size, allows a single allocation
+ * to handle all possible callers, and given typical object sizes,
+ * the alignment of the buddy allocation will naturally match.
+ */
+ phys = drm_pci_alloc(obj->base.dev,
+ roundup_pow_of_two(obj->base.size),
+ roundup_pow_of_two(obj->base.size));
+ if (!phys)
+ return -ENOMEM;
+
+ vaddr = phys->vaddr;
+ for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+ struct page *page;
+ char *src;
+
+ page = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto err_phys;
+ }
+
+ src = kmap_atomic(page);
+ memcpy(vaddr, src, PAGE_SIZE);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
+ kunmap_atomic(src);
+
+ put_page(page);
+ vaddr += PAGE_SIZE;
+ }
+
+ i915_gem_chipset_flush(to_i915(obj->base.dev));
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st) {
+ err = -ENOMEM;
+ goto err_phys;
+ }
+
+ if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+ kfree(st);
+ err = -ENOMEM;
+ goto err_phys;
+ }
+
+ sg = st->sgl;
+ sg->offset = 0;
+ sg->length = obj->base.size;
+
+ sg_dma_address(sg) = phys->busaddr;
+ sg_dma_len(sg) = obj->base.size;
+
+ obj->phys_handle = phys;
+
+ __i915_gem_object_set_pages(obj, st, sg->length);
+
+ return 0;
+
+err_phys:
+ drm_pci_free(obj->base.dev, phys);
+
+ return err;
+}
+
+static void
+i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ __i915_gem_object_release_shmem(obj, pages, false);
+
+ if (obj->mm.dirty) {
+ struct address_space *mapping = obj->base.filp->f_mapping;
+ char *vaddr = obj->phys_handle->vaddr;
+ int i;
+
+ for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+ struct page *page;
+ char *dst;
+
+ page = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(page))
+ continue;
+
+ dst = kmap_atomic(page);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
+ memcpy(dst, vaddr, PAGE_SIZE);
+ kunmap_atomic(dst);
+
+ set_page_dirty(page);
+ if (obj->mm.madv == I915_MADV_WILLNEED)
+ mark_page_accessed(page);
+ put_page(page);
+ vaddr += PAGE_SIZE;
+ }
+ obj->mm.dirty = false;
+ }
+
+ sg_free_table(pages);
+ kfree(pages);
+
+ drm_pci_free(obj->base.dev, obj->phys_handle);
+}
+
+static void
+i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin_pages(obj);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
+ .get_pages = i915_gem_object_get_pages_phys,
+ .put_pages = i915_gem_object_put_pages_phys,
+ .release = i915_gem_object_release_phys,
+};
+
+int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
+{
+ struct sg_table *pages;
+ int err;
+
+ if (align > obj->base.size)
+ return -EINVAL;
+
+ if (obj->ops == &i915_gem_phys_ops)
+ return 0;
+
+ if (obj->ops != &i915_gem_shmem_ops)
+ return -EINVAL;
+
+ err = i915_gem_object_unbind(obj);
+ if (err)
+ return err;
+
+ mutex_lock(&obj->mm.lock);
+
+ if (obj->mm.madv != I915_MADV_WILLNEED) {
+ err = -EFAULT;
+ goto err_unlock;
+ }
+
+ if (obj->mm.quirked) {
+ err = -EFAULT;
+ goto err_unlock;
+ }
+
+ if (obj->mm.mapping) {
+ err = -EBUSY;
+ goto err_unlock;
+ }
+
+ pages = __i915_gem_object_unset_pages(obj);
+
+ obj->ops = &i915_gem_phys_ops;
+
+ err = ____i915_gem_object_get_pages(obj);
+ if (err)
+ goto err_xfer;
+
+ /* Perma-pin (until release) the physical set of pages */
+ __i915_gem_object_pin_pages(obj);
+
+ if (!IS_ERR_OR_NULL(pages))
+ i915_gem_shmem_ops.put_pages(obj, pages);
+ mutex_unlock(&obj->mm.lock);
+ return 0;
+
+err_xfer:
+ obj->ops = &i915_gem_shmem_ops;
+ if (!IS_ERR_OR_NULL(pages)) {
+ unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+
+ __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+ }
+err_unlock:
+ mutex_unlock(&obj->mm.lock);
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_gem_phys.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
new file mode 100644
index 000000000000..05011d4a3b88
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -0,0 +1,294 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt_pm.h"
+
+#include "i915_drv.h"
+#include "i915_globals.h"
+
+static void call_idle_barriers(struct intel_engine_cs *engine)
+{
+ struct llist_node *node, *next;
+
+ llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
+ struct i915_active_request *active =
+ container_of((struct list_head *)node,
+ typeof(*active), link);
+
+ INIT_LIST_HEAD(&active->link);
+ RCU_INIT_POINTER(active->request, NULL);
+
+ active->retire(active, NULL);
+ }
+}
+
+static void i915_gem_park(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ for_each_engine(engine, i915, id) {
+ call_idle_barriers(engine); /* cleanup after wedging */
+ i915_gem_batch_pool_fini(&engine->batch_pool);
+ }
+
+ i915_timelines_park(i915);
+ i915_vma_parked(i915);
+
+ i915_globals_park();
+}
+
+static void idle_work_handler(struct work_struct *work)
+{
+ struct drm_i915_private *i915 =
+ container_of(work, typeof(*i915), gem.idle_work);
+ bool park;
+
+ cancel_delayed_work_sync(&i915->gem.retire_work);
+ mutex_lock(&i915->drm.struct_mutex);
+
+ intel_wakeref_lock(&i915->gt.wakeref);
+ park = !intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work);
+ intel_wakeref_unlock(&i915->gt.wakeref);
+ if (park)
+ i915_gem_park(i915);
+ else
+ queue_delayed_work(i915->wq,
+ &i915->gem.retire_work,
+ round_jiffies_up_relative(HZ));
+
+ mutex_unlock(&i915->drm.struct_mutex);
+}
+
+static void retire_work_handler(struct work_struct *work)
+{
+ struct drm_i915_private *i915 =
+ container_of(work, typeof(*i915), gem.retire_work.work);
+
+ /* Come back later if the device is busy... */
+ if (mutex_trylock(&i915->drm.struct_mutex)) {
+ i915_retire_requests(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ }
+
+ queue_delayed_work(i915->wq,
+ &i915->gem.retire_work,
+ round_jiffies_up_relative(HZ));
+}
+
+static int pm_notifier(struct notifier_block *nb,
+ unsigned long action,
+ void *data)
+{
+ struct drm_i915_private *i915 =
+ container_of(nb, typeof(*i915), gem.pm_notifier);
+
+ switch (action) {
+ case INTEL_GT_UNPARK:
+ i915_globals_unpark();
+ queue_delayed_work(i915->wq,
+ &i915->gem.retire_work,
+ round_jiffies_up_relative(HZ));
+ break;
+
+ case INTEL_GT_PARK:
+ queue_work(i915->wq, &i915->gem.idle_work);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
+{
+ bool result = !i915_terminally_wedged(i915);
+
+ do {
+ if (i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED |
+ I915_WAIT_FOR_IDLE_BOOST,
+ I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+ /* XXX hide warning from gem_eio */
+ if (i915_modparams.reset) {
+ dev_err(i915->drm.dev,
+ "Failed to idle engines, declaring wedged!\n");
+ GEM_TRACE_DUMP();
+ }
+
+ /*
+ * Forcibly cancel outstanding work and leave
+ * the gpu quiet.
+ */
+ i915_gem_set_wedged(i915);
+ result = false;
+ }
+ } while (i915_retire_requests(i915) && result);
+
+ GEM_BUG_ON(i915->gt.awake);
+ return result;
+}
+
+bool i915_gem_load_power_context(struct drm_i915_private *i915)
+{
+ return switch_to_kernel_context_sync(i915);
+}
+
+void i915_gem_suspend(struct drm_i915_private *i915)
+{
+ GEM_TRACE("\n");
+
+ intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
+ flush_workqueue(i915->wq);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ /*
+ * We have to flush all the executing contexts to main memory so
+ * that they can saved in the hibernation image. To ensure the last
+ * context image is coherent, we have to switch away from it. That
+ * leaves the i915->kernel_context still active when
+ * we actually suspend, and its image in memory may not match the GPU
+ * state. Fortunately, the kernel_context is disposable and we do
+ * not rely on its state.
+ */
+ switch_to_kernel_context_sync(i915);
+
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ /*
+ * Assert that we successfully flushed all the work and
+ * reset the GPU back to its idle, low power state.
+ */
+ GEM_BUG_ON(i915->gt.awake);
+ flush_work(&i915->gem.idle_work);
+
+ cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
+
+ i915_gem_drain_freed_objects(i915);
+
+ intel_uc_suspend(i915);
+}
+
+static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
+{
+ return list_first_entry_or_null(list,
+ struct drm_i915_gem_object,
+ mm.link);
+}
+
+void i915_gem_suspend_late(struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj;
+ struct list_head *phases[] = {
+ &i915->mm.shrink_list,
+ &i915->mm.purge_list,
+ NULL
+ }, **phase;
+ unsigned long flags;
+
+ /*
+ * Neither the BIOS, ourselves or any other kernel
+ * expects the system to be in execlists mode on startup,
+ * so we need to reset the GPU back to legacy mode. And the only
+ * known way to disable logical contexts is through a GPU reset.
+ *
+ * So in order to leave the system in a known default configuration,
+ * always reset the GPU upon unload and suspend. Afterwards we then
+ * clean up the GEM state tracking, flushing off the requests and
+ * leaving the system in a known idle state.
+ *
+ * Note that is of the upmost importance that the GPU is idle and
+ * all stray writes are flushed *before* we dismantle the backing
+ * storage for the pinned objects.
+ *
+ * However, since we are uncertain that resetting the GPU on older
+ * machines is a good idea, we don't - just in case it leaves the
+ * machine in an unusable condition.
+ */
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ for (phase = phases; *phase; phase++) {
+ LIST_HEAD(keep);
+
+ while ((obj = first_mm_object(*phase))) {
+ list_move_tail(&obj->mm.link, &keep);
+
+ /* Beware the background _i915_gem_free_objects */
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+
+ i915_gem_object_lock(obj);
+ WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+ i915_gem_object_unlock(obj);
+ i915_gem_object_put(obj);
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ }
+
+ list_splice_tail(&keep, *phase);
+ }
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+
+ intel_uc_sanitize(i915);
+ i915_gem_sanitize(i915);
+}
+
+void i915_gem_resume(struct drm_i915_private *i915)
+{
+ GEM_TRACE("\n");
+
+ WARN_ON(i915->gt.awake);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
+
+ i915_gem_restore_gtt_mappings(i915);
+ i915_gem_restore_fences(i915);
+
+ /*
+ * As we didn't flush the kernel context before suspend, we cannot
+ * guarantee that the context image is complete. So let's just reset
+ * it and start again.
+ */
+ intel_gt_resume(i915);
+
+ if (i915_gem_init_hw(i915))
+ goto err_wedged;
+
+ intel_uc_resume(i915);
+
+ /* Always reload a context for powersaving. */
+ if (!i915_gem_load_power_context(i915))
+ goto err_wedged;
+
+out_unlock:
+ intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return;
+
+err_wedged:
+ if (!i915_reset_failed(i915)) {
+ dev_err(i915->drm.dev,
+ "Failed to re-initialize GPU, declaring it wedged!\n");
+ i915_gem_set_wedged(i915);
+ }
+ goto out_unlock;
+}
+
+void i915_gem_init__pm(struct drm_i915_private *i915)
+{
+ INIT_WORK(&i915->gem.idle_work, idle_work_handler);
+ INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler);
+
+ i915->gem.pm_notifier.notifier_call = pm_notifier;
+ blocking_notifier_chain_register(&i915->gt.pm_notifications,
+ &i915->gem.pm_notifier);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
new file mode 100644
index 000000000000..6f7d5d11ac3b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
@@ -0,0 +1,25 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_PM_H__
+#define __I915_GEM_PM_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct work_struct;
+
+void i915_gem_init__pm(struct drm_i915_private *i915);
+
+bool i915_gem_load_power_context(struct drm_i915_private *i915);
+void i915_gem_resume(struct drm_i915_private *i915);
+
+void i915_gem_idle_work_handler(struct work_struct *work);
+
+void i915_gem_suspend(struct drm_i915_private *i915);
+void i915_gem_suspend_late(struct drm_i915_private *i915);
+
+#endif /* __I915_GEM_PM_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
new file mode 100644
index 000000000000..19d9ecdb2894
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -0,0 +1,571 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2016 Intel Corporation
+ */
+
+#include <linux/pagevec.h>
+#include <linux/swap.h>
+
+#include "i915_drv.h"
+#include "i915_gem_object.h"
+#include "i915_scatterlist.h"
+
+/*
+ * Move pages to appropriate lru and release the pagevec, decrementing the
+ * ref count of those pages.
+ */
+static void check_release_pagevec(struct pagevec *pvec)
+{
+ check_move_unevictable_pages(pvec);
+ __pagevec_release(pvec);
+ cond_resched();
+}
+
+static int shmem_get_pages(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ const unsigned long page_count = obj->base.size / PAGE_SIZE;
+ unsigned long i;
+ struct address_space *mapping;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ struct sgt_iter sgt_iter;
+ struct page *page;
+ unsigned long last_pfn = 0; /* suppress gcc warning */
+ unsigned int max_segment = i915_sg_segment_size();
+ unsigned int sg_page_sizes;
+ struct pagevec pvec;
+ gfp_t noreclaim;
+ int ret;
+
+ /*
+ * Assert that the object is not currently in any GPU domain. As it
+ * wasn't in the GTT, there shouldn't be any way it could have been in
+ * a GPU cache
+ */
+ GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+
+ /*
+ * If there's no chance of allocating enough pages for the whole
+ * object, bail early.
+ */
+ if (page_count > totalram_pages())
+ return -ENOMEM;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+rebuild_st:
+ if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ /*
+ * Get the list of pages out of our struct file. They'll be pinned
+ * at this point until we release them.
+ *
+ * Fail silently without starting the shrinker
+ */
+ mapping = obj->base.filp->f_mapping;
+ mapping_set_unevictable(mapping);
+ noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
+ noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
+
+ sg = st->sgl;
+ st->nents = 0;
+ sg_page_sizes = 0;
+ for (i = 0; i < page_count; i++) {
+ const unsigned int shrink[] = {
+ I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
+ 0,
+ }, *s = shrink;
+ gfp_t gfp = noreclaim;
+
+ do {
+ cond_resched();
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ if (!IS_ERR(page))
+ break;
+
+ if (!*s) {
+ ret = PTR_ERR(page);
+ goto err_sg;
+ }
+
+ i915_gem_shrink(i915, 2 * page_count, NULL, *s++);
+
+ /*
+ * We've tried hard to allocate the memory by reaping
+ * our own buffer, now let the real VM do its job and
+ * go down in flames if truly OOM.
+ *
+ * However, since graphics tend to be disposable,
+ * defer the oom here by reporting the ENOMEM back
+ * to userspace.
+ */
+ if (!*s) {
+ /* reclaim and warn, but no oom */
+ gfp = mapping_gfp_mask(mapping);
+
+ /*
+ * Our bo are always dirty and so we require
+ * kswapd to reclaim our pages (direct reclaim
+ * does not effectively begin pageout of our
+ * buffers on its own). However, direct reclaim
+ * only waits for kswapd when under allocation
+ * congestion. So as a result __GFP_RECLAIM is
+ * unreliable and fails to actually reclaim our
+ * dirty pages -- unless you try over and over
+ * again with !__GFP_NORETRY. However, we still
+ * want to fail this allocation rather than
+ * trigger the out-of-memory killer and for
+ * this we want __GFP_RETRY_MAYFAIL.
+ */
+ gfp |= __GFP_RETRY_MAYFAIL;
+ }
+ } while (1);
+
+ if (!i ||
+ sg->length >= max_segment ||
+ page_to_pfn(page) != last_pfn + 1) {
+ if (i) {
+ sg_page_sizes |= sg->length;
+ sg = sg_next(sg);
+ }
+ st->nents++;
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ } else {
+ sg->length += PAGE_SIZE;
+ }
+ last_pfn = page_to_pfn(page);
+
+ /* Check that the i965g/gm workaround works. */
+ WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
+ }
+ if (sg) { /* loop terminated early; short sg table */
+ sg_page_sizes |= sg->length;
+ sg_mark_end(sg);
+ }
+
+ /* Trim unused sg entries to avoid wasting memory. */
+ i915_sg_trim(st);
+
+ ret = i915_gem_gtt_prepare_pages(obj, st);
+ if (ret) {
+ /*
+ * DMA remapping failed? One possible cause is that
+ * it could not reserve enough large entries, asking
+ * for PAGE_SIZE chunks instead may be helpful.
+ */
+ if (max_segment > PAGE_SIZE) {
+ for_each_sgt_page(page, sgt_iter, st)
+ put_page(page);
+ sg_free_table(st);
+
+ max_segment = PAGE_SIZE;
+ goto rebuild_st;
+ } else {
+ dev_warn(&i915->drm.pdev->dev,
+ "Failed to DMA remap %lu pages\n",
+ page_count);
+ goto err_pages;
+ }
+ }
+
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ i915_gem_object_do_bit_17_swizzle(obj, st);
+
+ __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+ return 0;
+
+err_sg:
+ sg_mark_end(sg);
+err_pages:
+ mapping_clear_unevictable(mapping);
+ pagevec_init(&pvec);
+ for_each_sgt_page(page, sgt_iter, st) {
+ if (!pagevec_add(&pvec, page))
+ check_release_pagevec(&pvec);
+ }
+ if (pagevec_count(&pvec))
+ check_release_pagevec(&pvec);
+ sg_free_table(st);
+ kfree(st);
+
+ /*
+ * shmemfs first checks if there is enough memory to allocate the page
+ * and reports ENOSPC should there be insufficient, along with the usual
+ * ENOMEM for a genuine allocation failure.
+ *
+ * We use ENOSPC in our driver to mean that we have run out of aperture
+ * space and so want to translate the error from shmemfs back to our
+ * usual understanding of ENOMEM.
+ */
+ if (ret == -ENOSPC)
+ ret = -ENOMEM;
+
+ return ret;
+}
+
+static void
+shmem_truncate(struct drm_i915_gem_object *obj)
+{
+ /*
+ * Our goal here is to return as much of the memory as
+ * is possible back to the system as we are called from OOM.
+ * To do this we must instruct the shmfs to drop all of its
+ * backing pages, *now*.
+ */
+ shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
+ obj->mm.madv = __I915_MADV_PURGED;
+ obj->mm.pages = ERR_PTR(-EFAULT);
+}
+
+static void
+shmem_writeback(struct drm_i915_gem_object *obj)
+{
+ struct address_space *mapping;
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_NONE,
+ .nr_to_write = SWAP_CLUSTER_MAX,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
+ .for_reclaim = 1,
+ };
+ unsigned long i;
+
+ /*
+ * Leave mmapings intact (GTT will have been revoked on unbinding,
+ * leaving only CPU mmapings around) and add those pages to the LRU
+ * instead of invoking writeback so they are aged and paged out
+ * as normal.
+ */
+ mapping = obj->base.filp->f_mapping;
+
+ /* Begin writeback on each dirty page */
+ for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
+ struct page *page;
+
+ page = find_lock_entry(mapping, i);
+ if (!page || xa_is_value(page))
+ continue;
+
+ if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
+ int ret;
+
+ SetPageReclaim(page);
+ ret = mapping->a_ops->writepage(page, &wbc);
+ if (!PageWriteback(page))
+ ClearPageReclaim(page);
+ if (!ret)
+ goto put;
+ }
+ unlock_page(page);
+put:
+ put_page(page);
+ }
+}
+
+void
+__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
+ struct sg_table *pages,
+ bool needs_clflush)
+{
+ GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
+
+ if (obj->mm.madv == I915_MADV_DONTNEED)
+ obj->mm.dirty = false;
+
+ if (needs_clflush &&
+ (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
+ !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ drm_clflush_sg(pages);
+
+ __start_cpu_write(obj);
+}
+
+static void
+shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
+{
+ struct sgt_iter sgt_iter;
+ struct pagevec pvec;
+ struct page *page;
+
+ __i915_gem_object_release_shmem(obj, pages, true);
+
+ i915_gem_gtt_finish_pages(obj, pages);
+
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ i915_gem_object_save_bit_17_swizzle(obj, pages);
+
+ mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
+
+ pagevec_init(&pvec);
+ for_each_sgt_page(page, sgt_iter, pages) {
+ if (obj->mm.dirty)
+ set_page_dirty(page);
+
+ if (obj->mm.madv == I915_MADV_WILLNEED)
+ mark_page_accessed(page);
+
+ if (!pagevec_add(&pvec, page))
+ check_release_pagevec(&pvec);
+ }
+ if (pagevec_count(&pvec))
+ check_release_pagevec(&pvec);
+ obj->mm.dirty = false;
+
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+static int
+shmem_pwrite(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *arg)
+{
+ struct address_space *mapping = obj->base.filp->f_mapping;
+ char __user *user_data = u64_to_user_ptr(arg->data_ptr);
+ u64 remain, offset;
+ unsigned int pg;
+
+ /* Caller already validated user args */
+ GEM_BUG_ON(!access_ok(user_data, arg->size));
+
+ /*
+ * Before we instantiate/pin the backing store for our use, we
+ * can prepopulate the shmemfs filp efficiently using a write into
+ * the pagecache. We avoid the penalty of instantiating all the
+ * pages, important if the user is just writing to a few and never
+ * uses the object on the GPU, and using a direct write into shmemfs
+ * allows it to avoid the cost of retrieving a page (either swapin
+ * or clearing-before-use) before it is overwritten.
+ */
+ if (i915_gem_object_has_pages(obj))
+ return -ENODEV;
+
+ if (obj->mm.madv != I915_MADV_WILLNEED)
+ return -EFAULT;
+
+ /*
+ * Before the pages are instantiated the object is treated as being
+ * in the CPU domain. The pages will be clflushed as required before
+ * use, and we can freely write into the pages directly. If userspace
+ * races pwrite with any other operation; corruption will ensue -
+ * that is userspace's prerogative!
+ */
+
+ remain = arg->size;
+ offset = arg->offset;
+ pg = offset_in_page(offset);
+
+ do {
+ unsigned int len, unwritten;
+ struct page *page;
+ void *data, *vaddr;
+ int err;
+ char c;
+
+ len = PAGE_SIZE - pg;
+ if (len > remain)
+ len = remain;
+
+ /* Prefault the user page to reduce potential recursion */
+ err = __get_user(c, user_data);
+ if (err)
+ return err;
+
+ err = __get_user(c, user_data + len - 1);
+ if (err)
+ return err;
+
+ err = pagecache_write_begin(obj->base.filp, mapping,
+ offset, len, 0,
+ &page, &data);
+ if (err < 0)
+ return err;
+
+ vaddr = kmap_atomic(page);
+ unwritten = __copy_from_user_inatomic(vaddr + pg,
+ user_data,
+ len);
+ kunmap_atomic(vaddr);
+
+ err = pagecache_write_end(obj->base.filp, mapping,
+ offset, len, len - unwritten,
+ page, data);
+ if (err < 0)
+ return err;
+
+ /* We don't handle -EFAULT, leave it to the caller to check */
+ if (unwritten)
+ return -ENODEV;
+
+ remain -= len;
+ user_data += len;
+ offset += len;
+ pg = 0;
+ } while (remain);
+
+ return 0;
+}
+
+const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_IS_SHRINKABLE,
+
+ .get_pages = shmem_get_pages,
+ .put_pages = shmem_put_pages,
+ .truncate = shmem_truncate,
+ .writeback = shmem_writeback,
+
+ .pwrite = shmem_pwrite,
+};
+
+static int create_shmem(struct drm_i915_private *i915,
+ struct drm_gem_object *obj,
+ size_t size)
+{
+ unsigned long flags = VM_NORESERVE;
+ struct file *filp;
+
+ drm_gem_private_object_init(&i915->drm, obj, size);
+
+ if (i915->mm.gemfs)
+ filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
+ flags);
+ else
+ filp = shmem_file_setup("i915", size, flags);
+ if (IS_ERR(filp))
+ return PTR_ERR(filp);
+
+ obj->filp = filp;
+ return 0;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size)
+{
+ struct drm_i915_gem_object *obj;
+ struct address_space *mapping;
+ unsigned int cache_level;
+ gfp_t mask;
+ int ret;
+
+ /* There is a prevalence of the assumption that we fit the object's
+ * page count inside a 32bit _signed_ variable. Let's document this and
+ * catch if we ever need to fix it. In the meantime, if you do spot
+ * such a local variable, please consider fixing!
+ */
+ if (size >> PAGE_SHIFT > INT_MAX)
+ return ERR_PTR(-E2BIG);
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ ret = create_shmem(i915, &obj->base, size);
+ if (ret)
+ goto fail;
+
+ mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
+ if (IS_I965GM(i915) || IS_I965G(i915)) {
+ /* 965gm cannot relocate objects above 4GiB. */
+ mask &= ~__GFP_HIGHMEM;
+ mask |= __GFP_DMA32;
+ }
+
+ mapping = obj->base.filp->f_mapping;
+ mapping_set_gfp_mask(mapping, mask);
+ GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
+
+ i915_gem_object_init(obj, &i915_gem_shmem_ops);
+
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+
+ if (HAS_LLC(i915))
+ /* On some devices, we can have the GPU use the LLC (the CPU
+ * cache) for about a 10% performance improvement
+ * compared to uncached. Graphics requests other than
+ * display scanout are coherent with the CPU in
+ * accessing this cache. This means in this mode we
+ * don't need to clflush on the CPU side, and on the
+ * GPU side we only need to flush internal caches to
+ * get data visible to the CPU.
+ *
+ * However, we maintain the display planes as UC, and so
+ * need to rebind when first used as such.
+ */
+ cache_level = I915_CACHE_LLC;
+ else
+ cache_level = I915_CACHE_NONE;
+
+ i915_gem_object_set_cache_coherency(obj, cache_level);
+
+ trace_i915_gem_object_create(obj);
+
+ return obj;
+
+fail:
+ i915_gem_object_free(obj);
+ return ERR_PTR(ret);
+}
+
+/* Allocate a new GEM object and fill it with the supplied data */
+struct drm_i915_gem_object *
+i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
+ const void *data, size_t size)
+{
+ struct drm_i915_gem_object *obj;
+ struct file *file;
+ size_t offset;
+ int err;
+
+ obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
+ if (IS_ERR(obj))
+ return obj;
+
+ GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
+
+ file = obj->base.filp;
+ offset = 0;
+ do {
+ unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
+ struct page *page;
+ void *pgdata, *vaddr;
+
+ err = pagecache_write_begin(file, file->f_mapping,
+ offset, len, 0,
+ &page, &pgdata);
+ if (err < 0)
+ goto fail;
+
+ vaddr = kmap(page);
+ memcpy(vaddr, data, len);
+ kunmap(page);
+
+ err = pagecache_write_end(file, file->f_mapping,
+ offset, len, len,
+ page, pgdata);
+ if (err < 0)
+ goto fail;
+
+ size -= len;
+ data += len;
+ offset += len;
+ } while (size);
+
+ return obj;
+
+fail:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 6da795c7e62e..3a926a8755c6 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -1,25 +1,7 @@
/*
- * Copyright © 2008-2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2008-2015 Intel Corporation
*/
#include <linux/oom.h>
@@ -32,7 +14,6 @@
#include <linux/vmalloc.h>
#include <drm/i915_drm.h>
-#include "i915_drv.h"
#include "i915_trace.h"
static bool shrinker_lock(struct drm_i915_private *i915,
@@ -88,7 +69,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* to the GPU, simply unbinding from the GPU is not going to succeed
* in releasing our pin count on the pages themselves.
*/
- if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
+ if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
return false;
/* If any vma are "permanently" pinned, it will prevent us from
@@ -114,12 +95,26 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
return !i915_gem_object_has_pages(obj);
}
+static void try_to_writeback(struct drm_i915_gem_object *obj,
+ unsigned int flags)
+{
+ switch (obj->mm.madv) {
+ case I915_MADV_DONTNEED:
+ i915_gem_object_truncate(obj);
+ case __I915_MADV_PURGED:
+ return;
+ }
+
+ if (flags & I915_SHRINK_WRITEBACK)
+ i915_gem_object_writeback(obj);
+}
+
/**
* i915_gem_shrink - Shrink buffer object caches
* @i915: i915 device
* @target: amount of memory to make available, in pages
* @nr_scanned: optional output for number of pages scanned (incremental)
- * @flags: control flags for selecting cache types
+ * @shrink: control flags for selecting cache types
*
* This function is the main interface to the shrinker. It will try to release
* up to @target pages of main memory backing storage from buffer objects.
@@ -143,14 +138,17 @@ unsigned long
i915_gem_shrink(struct drm_i915_private *i915,
unsigned long target,
unsigned long *nr_scanned,
- unsigned flags)
+ unsigned int shrink)
{
const struct {
struct list_head *list;
unsigned int bit;
} phases[] = {
- { &i915->mm.unbound_list, I915_SHRINK_UNBOUND },
- { &i915->mm.bound_list, I915_SHRINK_BOUND },
+ { &i915->mm.purge_list, ~0u },
+ {
+ &i915->mm.shrink_list,
+ I915_SHRINK_BOUND | I915_SHRINK_UNBOUND
+ },
{ NULL, 0 },
}, *phase;
intel_wakeref_t wakeref = 0;
@@ -158,24 +156,19 @@ i915_gem_shrink(struct drm_i915_private *i915,
unsigned long scanned = 0;
bool unlock;
- if (!shrinker_lock(i915, flags, &unlock))
+ if (!shrinker_lock(i915, shrink, &unlock))
return 0;
/*
- * When shrinking the active list, also consider active contexts.
- * Active contexts are pinned until they are retired, and so can
- * not be simply unbound to retire and unpin their pages. To shrink
- * the contexts, we must wait until the gpu is idle.
- *
- * We don't care about errors here; if we cannot wait upon the GPU,
- * we will free as much as we can and hope to get a second chance.
+ * When shrinking the active list, we should also consider active
+ * contexts. Active contexts are pinned until they are retired, and
+ * so can not be simply unbound to retire and unpin their pages. To
+ * shrink the contexts, we must wait until the gpu is idle and
+ * completed its switch to the kernel context. In short, we do
+ * not have a good mechanism for idling a specific context.
*/
- if (flags & I915_SHRINK_ACTIVE)
- i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- trace_i915_gem_shrink(i915, target, flags);
+ trace_i915_gem_shrink(i915, target, shrink);
i915_retire_requests(i915);
/*
@@ -183,10 +176,10 @@ i915_gem_shrink(struct drm_i915_private *i915,
* device just to recover a little memory. If absolutely necessary,
* we will force the wake during oom-notifier.
*/
- if (flags & I915_SHRINK_BOUND) {
- wakeref = intel_runtime_pm_get_if_in_use(i915);
+ if (shrink & I915_SHRINK_BOUND) {
+ wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
if (!wakeref)
- flags &= ~I915_SHRINK_BOUND;
+ shrink &= ~I915_SHRINK_BOUND;
}
/*
@@ -211,8 +204,9 @@ i915_gem_shrink(struct drm_i915_private *i915,
for (phase = phases; phase->list; phase++) {
struct list_head still_in_list;
struct drm_i915_gem_object *obj;
+ unsigned long flags;
- if ((flags & phase->bit) == 0)
+ if ((shrink & phase->bit) == 0)
continue;
INIT_LIST_HEAD(&still_in_list);
@@ -224,51 +218,56 @@ i915_gem_shrink(struct drm_i915_private *i915,
* to be able to shrink their pages, so they remain on
* the unbound/bound list until actually freed.
*/
- spin_lock(&i915->mm.obj_lock);
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
while (count < target &&
(obj = list_first_entry_or_null(phase->list,
typeof(*obj),
mm.link))) {
list_move_tail(&obj->mm.link, &still_in_list);
- if (flags & I915_SHRINK_PURGEABLE &&
- obj->mm.madv != I915_MADV_DONTNEED)
- continue;
-
- if (flags & I915_SHRINK_VMAPS &&
+ if (shrink & I915_SHRINK_VMAPS &&
!is_vmalloc_addr(obj->mm.mapping))
continue;
- if (!(flags & I915_SHRINK_ACTIVE) &&
+ if (!(shrink & I915_SHRINK_ACTIVE) &&
(i915_gem_object_is_active(obj) ||
i915_gem_object_is_framebuffer(obj)))
continue;
+ if (!(shrink & I915_SHRINK_BOUND) &&
+ atomic_read(&obj->bind_count))
+ continue;
+
if (!can_release_pages(obj))
continue;
- spin_unlock(&i915->mm.obj_lock);
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
if (unsafe_drop_pages(obj)) {
/* May arrive from get_pages on another bo */
mutex_lock_nested(&obj->mm.lock,
I915_MM_SHRINKER);
if (!i915_gem_object_has_pages(obj)) {
- __i915_gem_object_invalidate(obj);
+ try_to_writeback(obj, shrink);
count += obj->base.size >> PAGE_SHIFT;
}
mutex_unlock(&obj->mm.lock);
}
+
scanned += obj->base.size >> PAGE_SHIFT;
+ i915_gem_object_put(obj);
- spin_lock(&i915->mm.obj_lock);
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
}
list_splice_tail(&still_in_list, phase->list);
- spin_unlock(&i915->mm.obj_lock);
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
- if (flags & I915_SHRINK_BOUND)
- intel_runtime_pm_put(i915, wakeref);
+ if (shrink & I915_SHRINK_BOUND)
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
i915_retire_requests(i915);
@@ -298,7 +297,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
intel_wakeref_t wakeref;
unsigned long freed = 0;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
freed = i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
@@ -313,25 +312,14 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *i915 =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_i915_gem_object *obj;
- unsigned long num_objects = 0;
- unsigned long count = 0;
+ unsigned long num_objects;
+ unsigned long count;
- spin_lock(&i915->mm.obj_lock);
- list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
- if (can_release_pages(obj)) {
- count += obj->base.size >> PAGE_SHIFT;
- num_objects++;
- }
+ count = READ_ONCE(i915->mm.shrink_memory) >> PAGE_SHIFT;
+ num_objects = READ_ONCE(i915->mm.shrink_count);
- list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
- if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) {
- count += obj->base.size >> PAGE_SHIFT;
- num_objects++;
- }
- spin_unlock(&i915->mm.obj_lock);
-
- /* Update our preferred vmscan batch size for the next pass.
+ /*
+ * Update our preferred vmscan batch size for the next pass.
* Our rough guess for an effective batch size is roughly 2
* available GEM objects worth of pages. That is we don't want
* the shrinker to fire, until it is worth the cost of freeing an
@@ -366,23 +354,18 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
&sc->nr_scanned,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
- I915_SHRINK_PURGEABLE);
- if (sc->nr_scanned < sc->nr_to_scan)
- freed += i915_gem_shrink(i915,
- sc->nr_to_scan - sc->nr_scanned,
- &sc->nr_scanned,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND);
+ I915_SHRINK_WRITEBACK);
if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
freed += i915_gem_shrink(i915,
sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned,
I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND);
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_WRITEBACK);
}
}
@@ -397,39 +380,35 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
struct drm_i915_private *i915 =
container_of(nb, struct drm_i915_private, mm.oom_notifier);
struct drm_i915_gem_object *obj;
- unsigned long unevictable, bound, unbound, freed_pages;
+ unsigned long unevictable, available, freed_pages;
intel_wakeref_t wakeref;
+ unsigned long flags;
freed_pages = 0;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND);
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_WRITEBACK);
/* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not
* being pointed to by hardware.
*/
- unbound = bound = unevictable = 0;
- spin_lock(&i915->mm.obj_lock);
- list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) {
- if (!can_release_pages(obj))
- unevictable += obj->base.size >> PAGE_SHIFT;
- else
- unbound += obj->base.size >> PAGE_SHIFT;
- }
- list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
+ available = unevictable = 0;
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
- bound += obj->base.size >> PAGE_SHIFT;
+ available += obj->base.size >> PAGE_SHIFT;
}
- spin_unlock(&i915->mm.obj_lock);
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- if (freed_pages || unbound || bound)
+ if (freed_pages || available)
pr_info("Purging GPU memory, %lu pages freed, "
- "%lu pages still pinned.\n",
- freed_pages, unevictable);
+ "%lu pages still pinned, %lu pages left available.\n",
+ freed_pages, unevictable, available);
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
@@ -454,7 +433,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
MAX_SCHEDULE_TIMEOUT))
goto out;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 0a8082cfc761..de1fab2058ec 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -1,32 +1,15 @@
/*
- * Copyright © 2008-2012 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- * Chris Wilson <chris@chris-wilson.co.uk>
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2008-2012 Intel Corporation
*/
+#include <linux/errno.h>
+#include <linux/mutex.h>
+
+#include <drm/drm_mm.h>
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
/*
@@ -342,11 +325,11 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
*size = stolen_top - *base;
}
-static void icl_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void icl_get_stolen_reserved(struct drm_i915_private *i915,
resource_size_t *base,
resource_size_t *size)
{
- u64 reg_val = I915_READ64(GEN6_STOLEN_RESERVED);
+ u64 reg_val = intel_uncore_read64(&i915->uncore, GEN6_STOLEN_RESERVED);
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
@@ -706,10 +689,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
mutex_unlock(&ggtt->vm.mutex);
- spin_lock(&dev_priv->mm.obj_lock);
- list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
- obj->bind_count++;
- spin_unlock(&dev_priv->mm.obj_lock);
+ GEM_BUG_ON(i915_gem_object_is_shrinkable(obj));
+ atomic_inc(&obj->bind_count);
return obj;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
new file mode 100644
index 000000000000..adb3074d9ce2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
@@ -0,0 +1,73 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2016 Intel Corporation
+ */
+
+#include <linux/jiffies.h>
+
+#include <drm/drm_file.h>
+
+#include "i915_drv.h"
+#include "i915_gem_ioctls.h"
+#include "i915_gem_object.h"
+
+/*
+ * 20ms is a fairly arbitrary limit (greater than the average frame time)
+ * chosen to prevent the CPU getting more than a frame ahead of the GPU
+ * (when using lax throttling for the frontbuffer). We also use it to
+ * offer free GPU waitboosts for severely congested workloads.
+ */
+#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
+
+/*
+ * Throttle our rendering by waiting until the ring has completed our requests
+ * emitted over 20 msec ago.
+ *
+ * Note that if we were to use the current jiffies each time around the loop,
+ * we wouldn't escape the function with any frames outstanding if the time to
+ * render a frame was over 20ms.
+ *
+ * This should get us reasonable parallelism between CPU and GPU but also
+ * relatively low latency when blocking on a particular request to finish.
+ */
+int
+i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
+ struct i915_request *request, *target = NULL;
+ long ret;
+
+ /* ABI: return -EIO if already wedged */
+ ret = i915_terminally_wedged(to_i915(dev));
+ if (ret)
+ return ret;
+
+ spin_lock(&file_priv->mm.lock);
+ list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
+ if (time_after_eq(request->emitted_jiffies, recent_enough))
+ break;
+
+ if (target) {
+ list_del(&target->client_link);
+ target->file_priv = NULL;
+ }
+
+ target = request;
+ }
+ if (target)
+ i915_request_get(target);
+ spin_unlock(&file_priv->mm.lock);
+
+ if (!target)
+ return 0;
+
+ ret = i915_request_wait(target,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(target);
+
+ return ret < 0 ? ret : 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index a9b5329dae3b..ca0c2f451742 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -1,34 +1,17 @@
/*
- * Copyright © 2008 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2008 Intel Corporation
*/
#include <linux/string.h>
#include <linux/bitops.h>
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "i915_gem.h"
+#include "i915_gem_ioctls.h"
+#include "i915_gem_object.h"
/**
* DOC: buffer object tiling
@@ -296,7 +279,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
i915_gem_object_unlock(obj);
/* Force the fence to be reacquired for GTT access */
- i915_gem_release_mmap(obj);
+ i915_gem_object_release_mmap(obj);
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 8079ea3af103..528b61678334 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -1,37 +1,23 @@
/*
- * Copyright © 2012-2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2012-2014 Intel Corporation
*/
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
-#include "i915_trace.h"
-#include "intel_drv.h"
#include <linux/mmu_context.h>
#include <linux/mmu_notifier.h>
#include <linux/mempolicy.h>
#include <linux/swap.h>
#include <linux/sched/mm.h>
+#include <drm/i915_drm.h>
+
+#include "i915_gem_ioctls.h"
+#include "i915_gem_object.h"
+#include "i915_scatterlist.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
struct i915_mm_struct {
struct mm_struct *mm;
struct drm_i915_private *i915;
@@ -782,14 +768,14 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -EFAULT;
if (args->flags & I915_USERPTR_READ_ONLY) {
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_address_space *vm;
/*
* On almost all of the older hw, we cannot tell the GPU that
* a page is readonly.
*/
- ppgtt = dev_priv->kernel_context->ppgtt;
- if (!ppgtt || !ppgtt->vm.has_read_only)
+ vm = dev_priv->kernel_context->vm;
+ if (!vm || !vm->has_read_only)
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
new file mode 100644
index 000000000000..26ec6579b7cd
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -0,0 +1,278 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#include <linux/dma-fence-array.h>
+#include <linux/jiffies.h>
+
+#include "gt/intel_engine.h"
+
+#include "i915_gem_ioctls.h"
+#include "i915_gem_object.h"
+
+static long
+i915_gem_object_wait_fence(struct dma_fence *fence,
+ unsigned int flags,
+ long timeout)
+{
+ BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return timeout;
+
+ if (dma_fence_is_i915(fence))
+ return i915_request_wait(to_request(fence), flags, timeout);
+
+ return dma_fence_wait_timeout(fence,
+ flags & I915_WAIT_INTERRUPTIBLE,
+ timeout);
+}
+
+static long
+i915_gem_object_wait_reservation(struct reservation_object *resv,
+ unsigned int flags,
+ long timeout)
+{
+ unsigned int seq = __read_seqcount_begin(&resv->seq);
+ struct dma_fence *excl;
+ bool prune_fences = false;
+
+ if (flags & I915_WAIT_ALL) {
+ struct dma_fence **shared;
+ unsigned int count, i;
+ int ret;
+
+ ret = reservation_object_get_fences_rcu(resv,
+ &excl, &count, &shared);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ timeout = i915_gem_object_wait_fence(shared[i],
+ flags, timeout);
+ if (timeout < 0)
+ break;
+
+ dma_fence_put(shared[i]);
+ }
+
+ for (; i < count; i++)
+ dma_fence_put(shared[i]);
+ kfree(shared);
+
+ /*
+ * If both shared fences and an exclusive fence exist,
+ * then by construction the shared fences must be later
+ * than the exclusive fence. If we successfully wait for
+ * all the shared fences, we know that the exclusive fence
+ * must all be signaled. If all the shared fences are
+ * signaled, we can prune the array and recover the
+ * floating references on the fences/requests.
+ */
+ prune_fences = count && timeout >= 0;
+ } else {
+ excl = reservation_object_get_excl_rcu(resv);
+ }
+
+ if (excl && timeout >= 0)
+ timeout = i915_gem_object_wait_fence(excl, flags, timeout);
+
+ dma_fence_put(excl);
+
+ /*
+ * Opportunistically prune the fences iff we know they have *all* been
+ * signaled and that the reservation object has not been changed (i.e.
+ * no new fences have been added).
+ */
+ if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
+ if (reservation_object_trylock(resv)) {
+ if (!__read_seqcount_retry(&resv->seq, seq))
+ reservation_object_add_excl_fence(resv, NULL);
+ reservation_object_unlock(resv);
+ }
+ }
+
+ return timeout;
+}
+
+static void __fence_set_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr)
+{
+ struct i915_request *rq;
+ struct intel_engine_cs *engine;
+
+ if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
+ return;
+
+ rq = to_request(fence);
+ engine = rq->engine;
+
+ local_bh_disable();
+ rcu_read_lock(); /* RCU serialisation for set-wedged protection */
+ if (engine->schedule)
+ engine->schedule(rq, attr);
+ rcu_read_unlock();
+ local_bh_enable(); /* kick the tasklets if queues were reprioritised */
+}
+
+static void fence_set_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr)
+{
+ /* Recurse once into a fence-array */
+ if (dma_fence_is_array(fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+ int i;
+
+ for (i = 0; i < array->num_fences; i++)
+ __fence_set_priority(array->fences[i], attr);
+ } else {
+ __fence_set_priority(fence, attr);
+ }
+}
+
+int
+i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ const struct i915_sched_attr *attr)
+{
+ struct dma_fence *excl;
+
+ if (flags & I915_WAIT_ALL) {
+ struct dma_fence **shared;
+ unsigned int count, i;
+ int ret;
+
+ ret = reservation_object_get_fences_rcu(obj->base.resv,
+ &excl, &count, &shared);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ fence_set_priority(shared[i], attr);
+ dma_fence_put(shared[i]);
+ }
+
+ kfree(shared);
+ } else {
+ excl = reservation_object_get_excl_rcu(obj->base.resv);
+ }
+
+ if (excl) {
+ fence_set_priority(excl, attr);
+ dma_fence_put(excl);
+ }
+ return 0;
+}
+
+/**
+ * Waits for rendering to the object to be completed
+ * @obj: i915 gem object
+ * @flags: how to wait (under a lock, for all rendering or just for writes etc)
+ * @timeout: how long to wait
+ */
+int
+i915_gem_object_wait(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ long timeout)
+{
+ might_sleep();
+ GEM_BUG_ON(timeout < 0);
+
+ timeout = i915_gem_object_wait_reservation(obj->base.resv,
+ flags, timeout);
+ return timeout < 0 ? timeout : 0;
+}
+
+static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
+{
+ /* nsecs_to_jiffies64() does not guard against overflow */
+ if (NSEC_PER_SEC % HZ &&
+ div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
+ return MAX_JIFFY_OFFSET;
+
+ return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
+}
+
+static unsigned long to_wait_timeout(s64 timeout_ns)
+{
+ if (timeout_ns < 0)
+ return MAX_SCHEDULE_TIMEOUT;
+
+ if (timeout_ns == 0)
+ return 0;
+
+ return nsecs_to_jiffies_timeout(timeout_ns);
+}
+
+/**
+ * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
+ *
+ * Returns 0 if successful, else an error is returned with the remaining time in
+ * the timeout parameter.
+ * -ETIME: object is still busy after timeout
+ * -ERESTARTSYS: signal interrupted the wait
+ * -ENONENT: object doesn't exist
+ * Also possible, but rare:
+ * -EAGAIN: incomplete, restart syscall
+ * -ENOMEM: damn
+ * -ENODEV: Internal IRQ fail
+ * -E?: The add request failed
+ *
+ * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
+ * non-zero timeout parameter the wait ioctl will wait for the given number of
+ * nanoseconds on an object becoming unbusy. Since the wait itself does so
+ * without holding struct_mutex the object may become re-busied before this
+ * function completes. A similar but shorter * race condition exists in the busy
+ * ioctl
+ */
+int
+i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_i915_gem_wait *args = data;
+ struct drm_i915_gem_object *obj;
+ ktime_t start;
+ long ret;
+
+ if (args->flags != 0)
+ return -EINVAL;
+
+ obj = i915_gem_object_lookup(file, args->bo_handle);
+ if (!obj)
+ return -ENOENT;
+
+ start = ktime_get();
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_PRIORITY |
+ I915_WAIT_ALL,
+ to_wait_timeout(args->timeout_ns));
+
+ if (args->timeout_ns > 0) {
+ args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
+ if (args->timeout_ns < 0)
+ args->timeout_ns = 0;
+
+ /*
+ * Apparently ktime isn't accurate enough and occasionally has a
+ * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
+ * things up to make the test happy. We allow up to 1 jiffy.
+ *
+ * This is a regression from the timespec->ktime conversion.
+ */
+ if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
+ args->timeout_ns = 0;
+
+ /* Asked to wait beyond the jiffie/scheduler precision? */
+ if (ret == -ETIME && args->timeout_ns)
+ ret = -EAGAIN;
+ }
+
+ i915_gem_object_put(obj);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
index 888b7d3f04c3..099f3397aada 100644
--- a/drivers/gpu/drm/i915/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -1,25 +1,7 @@
/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2017 Intel Corporation
*/
#include <linux/fs.h>
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.h b/drivers/gpu/drm/i915/gem/i915_gemfs.h
new file mode 100644
index 000000000000..2a1e59af3e4a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.h
@@ -0,0 +1,16 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2017 Intel Corporation
+ */
+
+#ifndef __I915_GEMFS_H__
+#define __I915_GEMFS_H__
+
+struct drm_i915_private;
+
+int i915_gemfs_init(struct drm_i915_private *i915);
+
+void i915_gemfs_fini(struct drm_i915_private *i915);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index 419fd4d6a8f0..3c5d17b2b670 100644
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -1,27 +1,11 @@
/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2016 Intel Corporation
*/
+#include "i915_scatterlist.h"
+
#include "huge_gem_object.h"
static void huge_free_pages(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h
new file mode 100644
index 000000000000..549c1394bcdc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h
@@ -0,0 +1,27 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef __HUGE_GEM_OBJECT_H
+#define __HUGE_GEM_OBJECT_H
+
+struct drm_i915_gem_object *
+huge_gem_object(struct drm_i915_private *i915,
+ phys_addr_t phys_size,
+ dma_addr_t dma_size);
+
+static inline phys_addr_t
+huge_gem_object_phys_size(struct drm_i915_gem_object *obj)
+{
+ return obj->scratch;
+}
+
+static inline dma_addr_t
+huge_gem_object_dma_size(struct drm_i915_gem_object *obj)
+{
+ return obj->base.size;
+}
+
+#endif /* !__HUGE_GEM_OBJECT_H */
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 90721b54e7ae..b74729b6f353 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1,33 +1,21 @@
/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2017 Intel Corporation
*/
-#include "../i915_selftest.h"
-
#include <linux/prime_numbers.h>
-#include "mock_drm.h"
-#include "i915_random.h"
+#include "i915_selftest.h"
+
+#include "gem/i915_gem_pm.h"
+
+#include "igt_gem_utils.h"
+#include "mock_context.h"
+
+#include "selftests/mock_drm.h"
+#include "selftests/mock_gem_device.h"
+#include "selftests/i915_random.h"
static const unsigned int page_sizes[] = {
I915_GTT_PAGE_SIZE_2M,
@@ -380,7 +368,7 @@ static int igt_check_page_sizes(struct i915_vma *vma)
static int igt_mock_exhaust_device_supported_pages(void *arg)
{
- struct i915_hw_ppgtt *ppgtt = arg;
+ struct i915_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj;
@@ -459,7 +447,7 @@ out_device:
static int igt_mock_ppgtt_misaligned_dma(void *arg)
{
- struct i915_hw_ppgtt *ppgtt = arg;
+ struct i915_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned long supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj;
@@ -587,7 +575,7 @@ out_put:
}
static void close_object_list(struct list_head *objects,
- struct i915_hw_ppgtt *ppgtt)
+ struct i915_ppgtt *ppgtt)
{
struct drm_i915_gem_object *obj, *on;
@@ -607,7 +595,7 @@ static void close_object_list(struct list_head *objects,
static int igt_mock_ppgtt_huge_fill(void *arg)
{
- struct i915_hw_ppgtt *ppgtt = arg;
+ struct i915_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
unsigned long page_num;
@@ -728,7 +716,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
static int igt_mock_ppgtt_64K(void *arg)
{
- struct i915_hw_ppgtt *ppgtt = arg;
+ struct i915_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->vm.i915;
struct drm_i915_gem_object *obj;
const struct object_info {
@@ -972,27 +960,27 @@ static int gpu_write(struct i915_vma *vma,
GEM_BUG_ON(!intel_engine_can_store_dword(engine));
- err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
- if (err)
- return err;
-
batch = gpu_write_dw(vma, dword * sizeof(u32), value);
if (IS_ERR(batch))
return PTR_ERR(batch);
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_batch;
}
+ i915_vma_lock(batch);
err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
if (err)
goto err_request;
- i915_gem_object_set_active_reference(batch->obj);
-
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_lock(vma);
+ err = i915_gem_object_set_to_gtt_domain(vma->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
if (err)
goto err_request;
@@ -1006,6 +994,7 @@ err_request:
err_batch:
i915_vma_unpin(batch);
i915_vma_close(batch);
+ i915_vma_put(batch);
return err;
}
@@ -1016,7 +1005,7 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
unsigned long n;
int err;
- err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
+ err = i915_gem_object_prepare_read(obj, &needs_flush);
if (err)
return err;
@@ -1037,7 +1026,7 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
kunmap_atomic(ptr);
}
- i915_gem_obj_finish_shmem_access(obj);
+ i915_gem_object_finish_access(obj);
return err;
}
@@ -1049,8 +1038,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
u32 dword, u32 val)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma;
int err;
@@ -1103,8 +1091,7 @@ static int igt_write_huge(struct i915_gem_context *ctx,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
static struct intel_engine_cs *engines[I915_NUM_ENGINES];
struct intel_engine_cs *engine;
I915_RND_STATE(prng);
@@ -1389,7 +1376,7 @@ static int igt_ppgtt_gemfs_huge(void *arg)
for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
unsigned int size = sizes[i];
- obj = i915_gem_object_create(i915, size);
+ obj = i915_gem_object_create_shmem(i915, size);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -1430,7 +1417,7 @@ static int igt_ppgtt_pin_update(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *dev_priv = ctx->i915;
unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
- struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
+ struct i915_address_space *vm = ctx->vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
@@ -1445,7 +1432,7 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages.
*/
- if (!ppgtt || !i915_vm_is_4lvl(&ppgtt->vm)) {
+ if (!vm || !i915_vm_is_4lvl(vm)) {
pr_info("48b PPGTT not supported, skipping\n");
return 0;
}
@@ -1460,7 +1447,7 @@ static int igt_ppgtt_pin_update(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
+ vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
@@ -1514,7 +1501,7 @@ static int igt_ppgtt_pin_update(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
+ vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
@@ -1552,8 +1539,7 @@ static int igt_tmpfs_fallback(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct vfsmount *gemfs = i915->mm.gemfs;
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *vaddr;
@@ -1567,7 +1553,7 @@ static int igt_tmpfs_fallback(void *arg)
i915->mm.gemfs = NULL;
- obj = i915_gem_object_create(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out_restore;
@@ -1610,8 +1596,7 @@ static int igt_shrink_thp(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
unsigned int flags = PIN_USER;
@@ -1627,7 +1612,7 @@ static int igt_shrink_thp(void *arg)
return 0;
}
- obj = i915_gem_object_create(i915, SZ_2M);
+ obj = i915_gem_object_create_shmem(i915, SZ_2M);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -1698,7 +1683,7 @@ int i915_gem_huge_page_mock_selftests(void)
SUBTEST(igt_mock_ppgtt_64K),
};
struct drm_i915_private *dev_priv;
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
int err;
dev_priv = mock_gem_device();
@@ -1732,7 +1717,7 @@ int i915_gem_huge_page_mock_selftests(void)
err = i915_subtests(tests, ppgtt);
out_close:
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(&ppgtt->vm);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1769,7 +1754,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
return PTR_ERR(file);
mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
ctx = live_context(dev_priv, file);
if (IS_ERR(ctx)) {
@@ -1777,13 +1762,13 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
goto out_unlock;
}
- if (ctx->ppgtt)
- ctx->ppgtt->vm.scrub_64K = true;
+ if (ctx->vm)
+ ctx->vm->scrub_64K = true;
err = i915_subtests(tests, ctx);
out_unlock:
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
mock_file_free(dev_priv, file);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
new file mode 100644
index 000000000000..f3a5eb807c1c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+
+#include "selftests/igt_flush_test.h"
+#include "selftests/mock_drm.h"
+#include "mock_context.h"
+
+static int igt_client_fill(void *arg)
+{
+ struct intel_context *ce = arg;
+ struct drm_i915_private *i915 = ce->gem_context->i915;
+ struct drm_i915_gem_object *obj;
+ struct rnd_state prng;
+ IGT_TIMEOUT(end);
+ u32 *vaddr;
+ int err = 0;
+
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+
+ do {
+ u32 sz = prandom_u32_state(&prng) % SZ_32M;
+ u32 val = prandom_u32_state(&prng);
+ u32 i;
+
+ sz = round_up(sz, PAGE_SIZE);
+
+ pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);
+
+ obj = i915_gem_object_create_internal(i915, sz);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_flush;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_put;
+ }
+
+ /*
+ * XXX: The goal is move this to get_pages, so try to dirty the
+ * CPU cache first to check that we do the required clflush
+ * before scheduling the blt for !llc platforms. This matches
+ * some version of reality where at get_pages the pages
+ * themselves may not yet be coherent with the GPU(swap-in). If
+ * we are missing the flush then we should see the stale cache
+ * values after we do the set_to_cpu_domain and pick it up as a
+ * test failure.
+ */
+ memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32));
+
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
+ obj->cache_dirty = true;
+
+ err = i915_gem_schedule_fill_pages_blt(obj, ce, obj->mm.pages,
+ &obj->mm.page_sizes,
+ val);
+ if (err)
+ goto err_unpin;
+
+ /*
+ * XXX: For now do the wait without the object resv lock to
+ * ensure we don't deadlock.
+ */
+ err = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ goto err_unpin;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_cpu_domain(obj, false);
+ i915_gem_object_unlock(obj);
+ if (err)
+ goto err_unpin;
+
+ for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
+ if (vaddr[i] != val) {
+ pr_err("vaddr[%u]=%x, expected=%x\n", i,
+ vaddr[i], val);
+ err = -EINVAL;
+ goto err_unpin;
+ }
+ }
+
+ i915_gem_object_unpin_map(obj);
+ i915_gem_object_put(obj);
+ } while (!time_after(jiffies, end));
+
+ goto err_flush;
+
+err_unpin:
+ i915_gem_object_unpin_map(obj);
+err_put:
+ i915_gem_object_put(obj);
+err_flush:
+ mutex_lock(&i915->drm.struct_mutex);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ return err;
+}
+
+int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_client_fill),
+ };
+
+ if (i915_terminally_wedged(i915))
+ return 0;
+
+ if (!HAS_ENGINE(i915, BCS0))
+ return 0;
+
+ return i915_subtests(tests, i915->engine[BCS0]->kernel_context);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index e43630b40fce..8f22d3f18422 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -1,31 +1,13 @@
/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2017 Intel Corporation
*/
#include <linux/prime_numbers.h>
-#include "../i915_selftest.h"
-#include "i915_random.h"
+#include "i915_selftest.h"
+#include "selftests/i915_random.h"
static int cpu_set(struct drm_i915_gem_object *obj,
unsigned long offset,
@@ -37,7 +19,7 @@ static int cpu_set(struct drm_i915_gem_object *obj,
u32 *cpu;
int err;
- err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
+ err = i915_gem_object_prepare_write(obj, &needs_clflush);
if (err)
return err;
@@ -54,7 +36,7 @@ static int cpu_set(struct drm_i915_gem_object *obj,
drm_clflush_virt_range(cpu, sizeof(*cpu));
kunmap_atomic(map);
- i915_gem_obj_finish_shmem_access(obj);
+ i915_gem_object_finish_access(obj);
return 0;
}
@@ -69,7 +51,7 @@ static int cpu_get(struct drm_i915_gem_object *obj,
u32 *cpu;
int err;
- err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
+ err = i915_gem_object_prepare_read(obj, &needs_clflush);
if (err)
return err;
@@ -83,7 +65,7 @@ static int cpu_get(struct drm_i915_gem_object *obj,
*v = *cpu;
kunmap_atomic(map);
- i915_gem_obj_finish_shmem_access(obj);
+ i915_gem_object_finish_access(obj);
return 0;
}
@@ -96,7 +78,9 @@ static int gtt_set(struct drm_i915_gem_object *obj,
u32 __iomem *map;
int err;
+ i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
if (err)
return err;
@@ -123,7 +107,9 @@ static int gtt_get(struct drm_i915_gem_object *obj,
u32 __iomem *map;
int err;
+ i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
+ i915_gem_object_unlock(obj);
if (err)
return err;
@@ -149,7 +135,9 @@ static int wc_set(struct drm_i915_gem_object *obj,
u32 *map;
int err;
+ i915_gem_object_lock(obj);
err = i915_gem_object_set_to_wc_domain(obj, true);
+ i915_gem_object_unlock(obj);
if (err)
return err;
@@ -170,7 +158,9 @@ static int wc_get(struct drm_i915_gem_object *obj,
u32 *map;
int err;
+ i915_gem_object_lock(obj);
err = i915_gem_object_set_to_wc_domain(obj, false);
+ i915_gem_object_unlock(obj);
if (err)
return err;
@@ -194,7 +184,9 @@ static int gpu_set(struct drm_i915_gem_object *obj,
u32 *cs;
int err;
+ i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
if (err)
return err;
@@ -202,7 +194,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
- rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context);
+ rq = i915_request_create(i915->engine[RCS0]->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
@@ -233,7 +225,9 @@ static int gpu_set(struct drm_i915_gem_object *obj,
}
intel_ring_advance(rq, cs);
+ i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
i915_vma_unpin(vma);
i915_request_add(rq);
@@ -299,7 +293,7 @@ static int igt_gem_coherency(void *arg)
values = offsets + ncachelines;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for (over = igt_coherency_mode; over->name; over++) {
if (!over->set)
continue;
@@ -371,19 +365,19 @@ static int igt_gem_coherency(void *arg)
}
}
- __i915_gem_object_release_unless_active(obj);
+ i915_gem_object_put(obj);
}
}
}
}
unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
kfree(offsets);
return err;
put_object:
- __i915_gem_object_release_unless_active(obj);
+ i915_gem_object_put(obj);
goto unlock;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 4e1b6efc6b22..eaa2b16574c7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -1,40 +1,26 @@
/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2017 Intel Corporation
*/
#include <linux/prime_numbers.h>
-#include "../i915_reset.h"
-#include "../i915_selftest.h"
-#include "i915_random.h"
-#include "igt_flush_test.h"
-#include "igt_live_test.h"
-#include "igt_reset.h"
-#include "igt_spinner.h"
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_reset.h"
+#include "i915_selftest.h"
+
+#include "gem/selftests/igt_gem_utils.h"
+#include "selftests/i915_random.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_live_test.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/mock_drm.h"
+#include "selftests/mock_gem_device.h"
-#include "mock_drm.h"
-#include "mock_gem_device.h"
#include "huge_gem_object.h"
+#include "igt_gem_utils.h"
#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
@@ -67,7 +53,7 @@ static int live_nop_switch(void *arg)
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
@@ -90,16 +76,14 @@ static int live_nop_switch(void *arg)
times[0] = ktime_get_raw();
for (n = 0; n < nctx; n++) {
- rq = i915_request_alloc(engine, ctx[n]);
+ rq = igt_request_alloc(ctx[n], engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_unlock;
}
i915_request_add(rq);
}
- if (i915_request_wait(rq,
- I915_WAIT_LOCKED,
- HZ / 5) < 0) {
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Failed to populated %d contexts\n", nctx);
i915_gem_set_wedged(i915);
err = -EIO;
@@ -120,7 +104,7 @@ static int live_nop_switch(void *arg)
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
- rq = i915_request_alloc(engine, ctx[n % nctx]);
+ rq = igt_request_alloc(ctx[n % nctx], engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_unlock;
@@ -142,9 +126,7 @@ static int live_nop_switch(void *arg)
i915_request_add(rq);
}
- if (i915_request_wait(rq,
- I915_WAIT_LOCKED,
- HZ / 5) < 0) {
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Switching between %ld contexts timed out\n",
prime);
i915_gem_set_wedged(i915);
@@ -170,7 +152,7 @@ static int live_nop_switch(void *arg)
}
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
return err;
@@ -223,10 +205,6 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- goto err;
-
vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -260,8 +238,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
unsigned int dw)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct i915_request *rq;
struct i915_vma *vma;
struct i915_vma *batch;
@@ -275,7 +252,9 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
+ i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
+ i915_gem_object_unlock(obj);
if (err)
return err;
@@ -300,7 +279,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
goto err_vma;
}
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_batch;
@@ -316,22 +295,26 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
if (err)
goto err_request;
+ i915_vma_lock(batch);
err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
if (err)
goto skip_request;
+ i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
if (err)
goto skip_request;
- i915_gem_object_set_active_reference(batch->obj);
+ i915_request_add(rq);
+
i915_vma_unpin(batch);
i915_vma_close(batch);
+ i915_vma_put(batch);
i915_vma_unpin(vma);
- i915_request_add(rq);
-
return 0;
skip_request:
@@ -352,7 +335,7 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
unsigned int n, m, need_flush;
int err;
- err = i915_gem_obj_prepare_shmem_write(obj, &need_flush);
+ err = i915_gem_object_prepare_write(obj, &need_flush);
if (err)
return err;
@@ -367,7 +350,7 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
kunmap_atomic(map);
}
- i915_gem_obj_finish_shmem_access(obj);
+ i915_gem_object_finish_access(obj);
obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
obj->write_domain = 0;
return 0;
@@ -379,7 +362,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
unsigned int n, m, needs_flush;
int err;
- err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
+ err = i915_gem_object_prepare_read(obj, &needs_flush);
if (err)
return err;
@@ -417,7 +400,7 @@ out_unmap:
break;
}
- i915_gem_obj_finish_shmem_access(obj);
+ i915_gem_object_finish_access(obj);
return err;
}
@@ -444,8 +427,7 @@ create_test_object(struct i915_gem_context *ctx,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
+ struct i915_address_space *vm = ctx->vm ?: &ctx->i915->ggtt.vm;
u64 size;
int err;
@@ -541,13 +523,13 @@ static int igt_ctx_exec(void *arg)
}
}
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id,
- yesno(!!ctx->ppgtt), err);
+ yesno(!!ctx->vm), err);
goto out_unlock;
}
@@ -618,7 +600,7 @@ static int igt_shared_ctx_exec(void *arg)
goto out_unlock;
}
- if (!parent->ppgtt) { /* not full-ppgtt; nothing to share */
+ if (!parent->vm) { /* not full-ppgtt; nothing to share */
err = 0;
goto out_unlock;
}
@@ -649,7 +631,7 @@ static int igt_shared_ctx_exec(void *arg)
goto out_test;
}
- __assign_ppgtt(ctx, parent->ppgtt);
+ __assign_ppgtt(ctx, parent->vm);
if (!obj) {
obj = create_test_object(parent, file, &objects);
@@ -661,13 +643,13 @@ static int igt_shared_ctx_exec(void *arg)
}
err = 0;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id,
- yesno(!!ctx->ppgtt), err);
+ yesno(!!ctx->vm), err);
kernel_context_close(ctx);
goto out_test;
}
@@ -754,8 +736,7 @@ err:
static int
emit_rpcs_query(struct drm_i915_gem_object *obj,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+ struct intel_context *ce,
struct i915_request **rq_out)
{
struct i915_request *rq;
@@ -763,13 +744,15 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int err;
- GEM_BUG_ON(!intel_engine_can_store_dword(engine));
+ GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
- vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ vma = i915_vma_instance(obj, ce->gem_context->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
+ i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
+ i915_gem_object_unlock(obj);
if (err)
return err;
@@ -783,27 +766,33 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
goto err_vma;
}
- rq = i915_request_alloc(engine, ctx);
+ rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_batch;
}
- err = engine->emit_bb_start(rq, batch->node.start, batch->node.size, 0);
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ 0);
if (err)
goto err_request;
+ i915_vma_lock(batch);
err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
if (err)
goto skip_request;
+ i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
if (err)
goto skip_request;
- i915_gem_object_set_active_reference(batch->obj);
i915_vma_unpin(batch);
i915_vma_close(batch);
+ i915_vma_put(batch);
i915_vma_unpin(vma);
@@ -819,6 +808,7 @@ err_request:
i915_request_add(rq);
err_batch:
i915_vma_unpin(batch);
+ i915_vma_put(batch);
err_vma:
i915_vma_unpin(vma);
@@ -833,8 +823,7 @@ static int
__sseu_prepare(struct drm_i915_private *i915,
const char *name,
unsigned int flags,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+ struct intel_context *ce,
struct igt_spinner **spin)
{
struct i915_request *rq;
@@ -852,7 +841,10 @@ __sseu_prepare(struct drm_i915_private *i915,
if (ret)
goto err_free;
- rq = igt_spinner_create_request(*spin, ctx, engine, MI_NOOP);
+ rq = igt_spinner_create_request(*spin,
+ ce->gem_context,
+ ce->engine,
+ MI_NOOP);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
goto err_fini;
@@ -879,8 +871,7 @@ err_free:
static int
__read_slice_count(struct drm_i915_private *i915,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+ struct intel_context *ce,
struct drm_i915_gem_object *obj,
struct igt_spinner *spin,
u32 *rpcs)
@@ -891,14 +882,14 @@ __read_slice_count(struct drm_i915_private *i915,
u32 *buf, val;
long ret;
- ret = emit_rpcs_query(obj, ctx, engine, &rq);
+ ret = emit_rpcs_query(obj, ce, &rq);
if (ret)
return ret;
if (spin)
igt_spinner_end(spin);
- ret = i915_request_wait(rq, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT);
+ ret = i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
i915_request_put(rq);
if (ret < 0)
return ret;
@@ -955,31 +946,29 @@ static int
__sseu_finish(struct drm_i915_private *i915,
const char *name,
unsigned int flags,
- struct i915_gem_context *ctx,
- struct i915_gem_context *kctx,
- struct intel_engine_cs *engine,
+ struct intel_context *ce,
struct drm_i915_gem_object *obj,
unsigned int expected,
struct igt_spinner *spin)
{
- unsigned int slices =
- hweight32(intel_device_default_sseu(i915).slice_mask);
+ unsigned int slices = hweight32(ce->engine->sseu.slice_mask);
u32 rpcs = 0;
int ret = 0;
if (flags & TEST_RESET) {
- ret = i915_reset_engine(engine, "sseu");
+ ret = i915_reset_engine(ce->engine, "sseu");
if (ret)
goto out;
}
- ret = __read_slice_count(i915, ctx, engine, obj,
+ ret = __read_slice_count(i915, ce, obj,
flags & TEST_RESET ? NULL : spin, &rpcs);
ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
if (ret)
goto out;
- ret = __read_slice_count(i915, kctx, engine, obj, NULL, &rpcs);
+ ret = __read_slice_count(i915, ce->engine->kernel_context, obj,
+ NULL, &rpcs);
ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
out:
@@ -987,13 +976,11 @@ out:
igt_spinner_end(spin);
if ((flags & TEST_IDLE) && ret == 0) {
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ ret = i915_gem_wait_for_idle(i915, 0, MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
- ret = __read_slice_count(i915, ctx, engine, obj, NULL, &rpcs);
+ ret = __read_slice_count(i915, ce, obj, NULL, &rpcs);
ret = __check_rpcs(name, rpcs, ret, expected,
"Context", " after idle!");
}
@@ -1005,28 +992,22 @@ static int
__sseu_test(struct drm_i915_private *i915,
const char *name,
unsigned int flags,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+ struct intel_context *ce,
struct drm_i915_gem_object *obj,
struct intel_sseu sseu)
{
struct igt_spinner *spin = NULL;
- struct i915_gem_context *kctx;
int ret;
- kctx = kernel_context(i915);
- if (IS_ERR(kctx))
- return PTR_ERR(kctx);
-
- ret = __sseu_prepare(i915, name, flags, ctx, engine, &spin);
+ ret = __sseu_prepare(i915, name, flags, ce, &spin);
if (ret)
- goto out_context;
+ return ret;
- ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
+ ret = __intel_context_reconfigure_sseu(ce, sseu);
if (ret)
goto out_spin;
- ret = __sseu_finish(i915, name, flags, ctx, kctx, engine, obj,
+ ret = __sseu_finish(i915, name, flags, ce, obj,
hweight32(sseu.slice_mask), spin);
out_spin:
@@ -1035,10 +1016,6 @@ out_spin:
igt_spinner_fini(spin);
kfree(spin);
}
-
-out_context:
- kernel_context_close(kctx);
-
return ret;
}
@@ -1047,10 +1024,11 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
const char *name,
unsigned int flags)
{
- struct intel_sseu default_sseu = intel_device_default_sseu(i915);
struct intel_engine_cs *engine = i915->engine[RCS0];
+ struct intel_sseu default_sseu = engine->sseu;
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
+ struct intel_context *ce;
struct intel_sseu pg_sseu;
intel_wakeref_t wakeref;
struct drm_file *file;
@@ -1100,25 +1078,35 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
goto out_unlock;
}
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ ce = i915_gem_context_get_engine(ctx, RCS0);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ goto out_rpm;
+ }
+
+ ret = intel_context_pin(ce);
+ if (ret)
+ goto out_context;
/* First set the default mask. */
- ret = __sseu_test(i915, name, flags, ctx, engine, obj, default_sseu);
+ ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
if (ret)
goto out_fail;
/* Then set a power-gated configuration. */
- ret = __sseu_test(i915, name, flags, ctx, engine, obj, pg_sseu);
+ ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
if (ret)
goto out_fail;
/* Back to defaults. */
- ret = __sseu_test(i915, name, flags, ctx, engine, obj, default_sseu);
+ ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
if (ret)
goto out_fail;
/* One last power-gated configuration for the road. */
- ret = __sseu_test(i915, name, flags, ctx, engine, obj, pg_sseu);
+ ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
if (ret)
goto out_fail;
@@ -1126,10 +1114,13 @@ out_fail:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
ret = -EIO;
+ intel_context_unpin(ce);
+out_context:
+ intel_context_put(ce);
+out_rpm:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
i915_gem_object_put(obj);
- intel_runtime_pm_put(i915, wakeref);
-
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
@@ -1171,8 +1162,8 @@ static int igt_ctx_readonly(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj = NULL;
+ struct i915_address_space *vm;
struct i915_gem_context *ctx;
- struct i915_hw_ppgtt *ppgtt;
unsigned long idx, ndwords, dw;
struct igt_live_test t;
struct drm_file *file;
@@ -1203,8 +1194,8 @@ static int igt_ctx_readonly(void *arg)
goto out_unlock;
}
- ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt;
- if (!ppgtt || !ppgtt->vm.has_read_only) {
+ vm = ctx->vm ?: &i915->mm.aliasing_ppgtt->vm;
+ if (!vm || !vm->has_read_only) {
err = 0;
goto out_unlock;
}
@@ -1233,13 +1224,13 @@ static int igt_ctx_readonly(void *arg)
}
err = 0;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id,
- yesno(!!ctx->ppgtt), err);
+ yesno(!!ctx->vm), err);
goto out_unlock;
}
@@ -1283,7 +1274,7 @@ out_unlock:
static int check_scratch(struct i915_gem_context *ctx, u64 offset)
{
struct drm_mm_node *node =
- __drm_mm_interval_first(&ctx->ppgtt->vm.mm,
+ __drm_mm_interval_first(&ctx->vm->mm,
offset, offset + sizeof(u32) - 1);
if (!node || node->start > offset)
return 0;
@@ -1331,7 +1322,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ vma = i915_vma_instance(obj, ctx->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -1345,7 +1336,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err)
goto err_unpin;
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
@@ -1355,13 +1346,15 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err)
goto err_request;
+ i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
if (err)
goto skip_request;
- i915_gem_object_set_active_reference(obj);
i915_vma_unpin(vma);
i915_vma_close(vma);
+ i915_vma_put(vma);
i915_request_add(rq);
@@ -1426,7 +1419,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
- vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ vma = i915_vma_instance(obj, ctx->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -1440,7 +1433,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
if (err)
goto err_unpin;
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
@@ -1450,7 +1443,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
if (err)
goto err_request;
+ i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
if (err)
goto skip_request;
@@ -1459,7 +1454,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
i915_request_add(rq);
+ i915_gem_object_lock(obj);
err = i915_gem_object_set_to_cpu_domain(obj, false);
+ i915_gem_object_unlock(obj);
if (err)
goto err;
@@ -1531,14 +1528,14 @@ static int igt_vm_isolation(void *arg)
}
/* We can only test vm isolation, if the vm are distinct */
- if (ctx_a->ppgtt == ctx_b->ppgtt)
+ if (ctx_a->vm == ctx_b->vm)
goto out_unlock;
- vm_total = ctx_a->ppgtt->vm.total;
- GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
+ vm_total = ctx_a->vm->total;
+ GEM_BUG_ON(ctx_b->vm->total != vm_total);
vm_total -= I915_GTT_PAGE_SIZE;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
count = 0;
for_each_engine(engine, i915, id) {
@@ -1583,7 +1580,7 @@ static int igt_vm_isolation(void *arg)
count, RUNTIME_INFO(i915)->num_engines);
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
out_unlock:
if (igt_live_test_end(&t))
err = -EIO;
@@ -1608,111 +1605,9 @@ __engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines)
return "none";
}
-static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
- struct i915_gem_context *ctx,
- intel_engine_mask_t engines)
+static bool skip_unused_engines(struct intel_context *ce, void *data)
{
- struct intel_engine_cs *engine;
- intel_engine_mask_t tmp;
- int pass;
-
- GEM_TRACE("Testing %s\n", __engine_name(i915, engines));
- for (pass = 0; pass < 4; pass++) { /* Once busy; once idle; repeat */
- bool from_idle = pass & 1;
- int err;
-
- if (!from_idle) {
- for_each_engine_masked(engine, i915, engines, tmp) {
- struct i915_request *rq;
-
- rq = i915_request_alloc(engine, ctx);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- i915_request_add(rq);
- }
- }
-
- err = i915_gem_switch_to_kernel_context(i915,
- i915->gt.active_engines);
- if (err)
- return err;
-
- if (!from_idle) {
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- return err;
- }
-
- if (i915->gt.active_requests) {
- pr_err("%d active requests remain after switching to kernel context, pass %d (%s) on %s engine%s\n",
- i915->gt.active_requests,
- pass, from_idle ? "idle" : "busy",
- __engine_name(i915, engines),
- is_power_of_2(engines) ? "" : "s");
- return -EINVAL;
- }
-
- /* XXX Bonus points for proving we are the kernel context! */
-
- mutex_unlock(&i915->drm.struct_mutex);
- drain_delayed_work(&i915->gt.idle_work);
- mutex_lock(&i915->drm.struct_mutex);
- }
-
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- return -EIO;
-
- return 0;
-}
-
-static int igt_switch_to_kernel_context(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
- enum intel_engine_id id;
- intel_wakeref_t wakeref;
- int err;
-
- /*
- * A core premise of switching to the kernel context is that
- * if an engine is already idling in the kernel context, we
- * do not emit another request and wake it up. The other being
- * that we do indeed end up idling in the kernel context.
- */
-
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
-
- ctx = kernel_context(i915);
- if (IS_ERR(ctx)) {
- mutex_unlock(&i915->drm.struct_mutex);
- return PTR_ERR(ctx);
- }
-
- /* First check idling each individual engine */
- for_each_engine(engine, i915, id) {
- err = __igt_switch_to_kernel_context(i915, ctx, BIT(id));
- if (err)
- goto out_unlock;
- }
-
- /* Now en masse */
- err = __igt_switch_to_kernel_context(i915, ctx, ALL_ENGINES);
- if (err)
- goto out_unlock;
-
-out_unlock:
- GEM_TRACE_DUMP_ON(err);
-
- intel_runtime_pm_put(i915, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
- kernel_context_close(ctx);
- return err;
+ return !ce->state;
}
static void mock_barrier_task(void *data)
@@ -1729,7 +1624,6 @@ static int mock_context_barrier(void *arg)
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx;
struct i915_request *rq;
- intel_wakeref_t wakeref;
unsigned int counter;
int err;
@@ -1748,7 +1642,7 @@ static int mock_context_barrier(void *arg)
counter = 0;
err = context_barrier_task(ctx, 0,
- NULL, mock_barrier_task, &counter);
+ NULL, NULL, mock_barrier_task, &counter);
if (err) {
pr_err("Failed at line %d, err=%d\n", __LINE__, err);
goto out;
@@ -1761,31 +1655,31 @@ static int mock_context_barrier(void *arg)
counter = 0;
err = context_barrier_task(ctx, ALL_ENGINES,
- NULL, mock_barrier_task, &counter);
+ skip_unused_engines,
+ NULL,
+ mock_barrier_task,
+ &counter);
if (err) {
pr_err("Failed at line %d, err=%d\n", __LINE__, err);
goto out;
}
if (counter == 0) {
- pr_err("Did not retire immediately for all inactive engines\n");
+ pr_err("Did not retire immediately for all unused engines\n");
err = -EINVAL;
goto out;
}
- rq = ERR_PTR(-ENODEV);
- with_intel_runtime_pm(i915, wakeref)
- rq = i915_request_alloc(i915->engine[RCS0], ctx);
+ rq = igt_request_alloc(ctx, i915->engine[RCS0]);
if (IS_ERR(rq)) {
pr_err("Request allocation failed!\n");
goto out;
}
i915_request_add(rq);
- GEM_BUG_ON(list_empty(&ctx->active_engines));
counter = 0;
context_barrier_inject_fault = BIT(RCS0);
err = context_barrier_task(ctx, ALL_ENGINES,
- NULL, mock_barrier_task, &counter);
+ NULL, NULL, mock_barrier_task, &counter);
context_barrier_inject_fault = 0;
if (err == -ENXIO)
err = 0;
@@ -1800,7 +1694,10 @@ static int mock_context_barrier(void *arg)
counter = 0;
err = context_barrier_task(ctx, ALL_ENGINES,
- NULL, mock_barrier_task, &counter);
+ skip_unused_engines,
+ NULL,
+ mock_barrier_task,
+ &counter);
if (err) {
pr_err("Failed at line %d, err=%d\n", __LINE__, err);
goto out;
@@ -1824,7 +1721,6 @@ unlock:
int i915_gem_context_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
- SUBTEST(igt_switch_to_kernel_context),
SUBTEST(mock_context_barrier),
};
struct drm_i915_private *i915;
@@ -1843,7 +1739,6 @@ int i915_gem_context_mock_selftests(void)
int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
{
static const struct i915_subtest tests[] = {
- SUBTEST(igt_switch_to_kernel_context),
SUBTEST(live_nop_switch),
SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly),
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 2b943ee246c9..e3a64edef918 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -1,31 +1,14 @@
/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2016 Intel Corporation
*/
-#include "../i915_selftest.h"
+#include "i915_drv.h"
+#include "i915_selftest.h"
-#include "mock_gem_device.h"
#include "mock_dmabuf.h"
+#include "selftests/mock_gem_device.h"
static int igt_dmabuf_export(void *arg)
{
@@ -33,7 +16,7 @@ static int igt_dmabuf_export(void *arg)
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
- obj = i915_gem_object_create(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -57,7 +40,7 @@ static int igt_dmabuf_import_self(void *arg)
struct dma_buf *dmabuf;
int err;
- obj = i915_gem_object_create(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -232,7 +215,7 @@ static int igt_dmabuf_export_vmap(void *arg)
void *ptr;
int err;
- obj = i915_gem_object_create(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -279,7 +262,7 @@ static int igt_dmabuf_export_kmap(void *arg)
void *ptr;
int err;
- obj = i915_gem_object_create(i915, 2*PAGE_SIZE);
+ obj = i915_gem_object_create_shmem(i915, 2 * PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 971148fbe6f5..5c81f4b4813a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1,145 +1,15 @@
/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2016 Intel Corporation
*/
-#include "../i915_selftest.h"
+#include <linux/prime_numbers.h>
-#include "mock_gem_device.h"
+#include "gt/intel_gt_pm.h"
#include "huge_gem_object.h"
-
-static int igt_gem_object(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
- int err = -ENOMEM;
-
- /* Basic test to ensure we can create an object */
-
- obj = i915_gem_object_create(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- pr_err("i915_gem_object_create failed, err=%d\n", err);
- goto out;
- }
-
- err = 0;
- i915_gem_object_put(obj);
-out:
- return err;
-}
-
-static int igt_phys_object(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
- int err;
-
- /* Create an object and bind it to a contiguous set of physical pages,
- * i.e. exercise the i915_gem_object_phys API.
- */
-
- obj = i915_gem_object_create(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- pr_err("i915_gem_object_create failed, err=%d\n", err);
- goto out;
- }
-
- mutex_lock(&i915->drm.struct_mutex);
- err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
- mutex_unlock(&i915->drm.struct_mutex);
- if (err) {
- pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
- goto out_obj;
- }
-
- if (obj->ops != &i915_gem_phys_ops) {
- pr_err("i915_gem_object_attach_phys did not create a phys object\n");
- err = -EINVAL;
- goto out_obj;
- }
-
- if (!atomic_read(&obj->mm.pages_pin_count)) {
- pr_err("i915_gem_object_attach_phys did not pin its phys pages\n");
- err = -EINVAL;
- goto out_obj;
- }
-
- /* Make the object dirty so that put_pages must do copy back the data */
- mutex_lock(&i915->drm.struct_mutex);
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- mutex_unlock(&i915->drm.struct_mutex);
- if (err) {
- pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n",
- err);
- goto out_obj;
- }
-
-out_obj:
- i915_gem_object_put(obj);
-out:
- return err;
-}
-
-static int igt_gem_huge(void *arg)
-{
- const unsigned int nreal = 509; /* just to be awkward */
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
- unsigned int n;
- int err;
-
- /* Basic sanitycheck of our huge fake object allocation */
-
- obj = huge_gem_object(i915,
- nreal * PAGE_SIZE,
- i915->ggtt.vm.total + PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- err = i915_gem_object_pin_pages(obj);
- if (err) {
- pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
- nreal, obj->base.size / PAGE_SIZE, err);
- goto out;
- }
-
- for (n = 0; n < obj->base.size / PAGE_SIZE; n++) {
- if (i915_gem_object_get_page(obj, n) !=
- i915_gem_object_get_page(obj, n % nreal)) {
- pr_err("Page lookup mismatch at index %u [%u]\n",
- n, n % nreal);
- err = -EINVAL;
- goto out_unpin;
- }
- }
-
-out_unpin:
- i915_gem_object_unpin_pages(obj);
-out:
- i915_gem_object_put(obj);
- return err;
-}
+#include "i915_selftest.h"
+#include "selftests/igt_flush_test.h"
struct tile {
unsigned int width;
@@ -228,6 +98,14 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err) {
+ pr_err("Failed to flush to GTT write domain; err=%d\n", err);
+ return err;
+ }
+
for_each_prime_number_from(page, 1, npages) {
struct i915_ggtt_view view =
compute_partial_view(obj, page, MIN_CHUNK_PAGES);
@@ -240,13 +118,6 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
GEM_BUG_ON(view.partial.size > nreal);
cond_resched();
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- if (err) {
- pr_err("Failed to flush to GTT write domain; err=%d\n",
- err);
- return err;
- }
-
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma)) {
pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
@@ -265,14 +136,14 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
return PTR_ERR(io);
}
- iowrite32(page, io + n * PAGE_SIZE/sizeof(*io));
+ iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
i915_vma_unpin_iomap(vma);
offset = tiled_offset(tile, page << PAGE_SHIFT);
if (offset >= obj->base.size)
continue;
- flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+ i915_gem_flush_ggtt_writes(to_i915(obj->base.dev));
p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
cpu = kmap(p) + offset_in_page(offset);
@@ -334,7 +205,7 @@ static int igt_partial_tiling(void *arg)
}
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (1) {
IGT_TIMEOUT(end);
@@ -445,7 +316,7 @@ next_tiling: ;
}
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
out:
@@ -468,18 +339,20 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
if (err)
return err;
- rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context);
+ rq = i915_request_create(i915->engine[RCS0]->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
}
+ i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
i915_request_add(rq);
- __i915_gem_object_release_unless_active(obj);
i915_vma_unpin(vma);
+ i915_gem_object_put(obj); /* leave it only alive via its active ref */
return err;
}
@@ -495,7 +368,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
if (IS_ERR(obj))
return PTR_ERR(obj);
- err = i915_gem_object_create_mmap_offset(obj);
+ err = create_mmap_offset(obj);
i915_gem_object_put(obj);
return err == expected;
@@ -505,17 +378,21 @@ static void disable_retire_worker(struct drm_i915_private *i915)
{
i915_gem_shrinker_unregister(i915);
- mutex_lock(&i915->drm.struct_mutex);
- if (!i915->gt.active_requests++) {
- intel_wakeref_t wakeref;
+ intel_gt_pm_get(i915);
- with_intel_runtime_pm(i915, wakeref)
- i915_gem_unpark(i915);
- }
+ cancel_delayed_work_sync(&i915->gem.retire_work);
+ flush_work(&i915->gem.idle_work);
+}
+
+static void restore_retire_worker(struct drm_i915_private *i915)
+{
+ intel_gt_pm_put(i915);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ igt_flush_test(i915, I915_WAIT_LOCKED);
mutex_unlock(&i915->drm.struct_mutex);
- cancel_delayed_work_sync(&i915->gt.retire_work);
- cancel_delayed_work_sync(&i915->gt.idle_work);
+ i915_gem_shrinker_register(i915);
}
static int igt_mmap_offset_exhaustion(void *arg)
@@ -552,7 +429,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
}
/* Too large */
- if (!assert_mmap_offset(i915, 2*PAGE_SIZE, -ENOSPC)) {
+ if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) {
pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
err = -EINVAL;
goto out;
@@ -565,7 +442,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto out;
}
- err = i915_gem_object_create_mmap_offset(obj);
+ err = create_mmap_offset(obj);
if (err) {
pr_err("Unable to insert object into reclaimed hole\n");
goto err_obj;
@@ -581,8 +458,6 @@ static int igt_mmap_offset_exhaustion(void *arg)
/* Now fill with busy dead objects that we expect to reap */
for (loop = 0; loop < 3; loop++) {
- intel_wakeref_t wakeref;
-
if (i915_terminally_wedged(i915))
break;
@@ -592,10 +467,8 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto out;
}
- err = 0;
mutex_lock(&i915->drm.struct_mutex);
- with_intel_runtime_pm(i915, wakeref)
- err = make_obj_busy(obj);
+ err = make_obj_busy(obj);
mutex_unlock(&i915->drm.struct_mutex);
if (err) {
pr_err("[loop %d] Failed to busy the object\n", loop);
@@ -604,9 +477,9 @@ static int igt_mmap_offset_exhaustion(void *arg)
/* NB we rely on the _active_ reference to access obj now */
GEM_BUG_ON(!i915_gem_object_is_active(obj));
- err = i915_gem_object_create_mmap_offset(obj);
+ err = create_mmap_offset(obj);
if (err) {
- pr_err("[loop %d] i915_gem_object_create_mmap_offset failed with err=%d\n",
+ pr_err("[loop %d] create_mmap_offset failed with err=%d\n",
loop, err);
goto out;
}
@@ -615,42 +488,16 @@ static int igt_mmap_offset_exhaustion(void *arg)
out:
drm_mm_remove_node(&resv);
out_park:
- mutex_lock(&i915->drm.struct_mutex);
- if (--i915->gt.active_requests)
- queue_delayed_work(i915->wq, &i915->gt.retire_work, 0);
- else
- queue_delayed_work(i915->wq, &i915->gt.idle_work, 0);
- mutex_unlock(&i915->drm.struct_mutex);
- i915_gem_shrinker_register(i915);
+ restore_retire_worker(i915);
return err;
err_obj:
i915_gem_object_put(obj);
goto out;
}
-int i915_gem_object_mock_selftests(void)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(igt_gem_object),
- SUBTEST(igt_phys_object),
- };
- struct drm_i915_private *i915;
- int err;
-
- i915 = mock_gem_device();
- if (!i915)
- return -ENOMEM;
-
- err = i915_subtests(tests, i915);
-
- drm_dev_put(&i915->drm);
- return err;
-}
-
-int i915_gem_object_live_selftests(struct drm_i915_private *i915)
+int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
- SUBTEST(igt_gem_huge),
SUBTEST(igt_partial_tiling),
SUBTEST(igt_mmap_offset_exhaustion),
};
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
new file mode 100644
index 000000000000..2b6db6f799de
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
@@ -0,0 +1,99 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+
+#include "huge_gem_object.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/mock_gem_device.h"
+
+static int igt_gem_object(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ int err = -ENOMEM;
+
+ /* Basic test to ensure we can create an object */
+
+ obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ pr_err("i915_gem_object_create failed, err=%d\n", err);
+ goto out;
+ }
+
+ err = 0;
+ i915_gem_object_put(obj);
+out:
+ return err;
+}
+
+static int igt_gem_huge(void *arg)
+{
+ const unsigned int nreal = 509; /* just to be awkward */
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ unsigned int n;
+ int err;
+
+ /* Basic sanitycheck of our huge fake object allocation */
+
+ obj = huge_gem_object(i915,
+ nreal * PAGE_SIZE,
+ i915->ggtt.vm.total + PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
+ nreal, obj->base.size / PAGE_SIZE, err);
+ goto out;
+ }
+
+ for (n = 0; n < obj->base.size / PAGE_SIZE; n++) {
+ if (i915_gem_object_get_page(obj, n) !=
+ i915_gem_object_get_page(obj, n % nreal)) {
+ pr_err("Page lookup mismatch at index %u [%u]\n",
+ n, n % nreal);
+ err = -EINVAL;
+ goto out_unpin;
+ }
+ }
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+int i915_gem_object_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_gem_object),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ err = i915_subtests(tests, i915);
+
+ drm_dev_put(&i915->drm);
+ return err;
+}
+
+int i915_gem_object_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_gem_huge),
+ };
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
new file mode 100644
index 000000000000..e23d8c9e9298
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+
+#include "selftests/igt_flush_test.h"
+#include "selftests/mock_drm.h"
+#include "mock_context.h"
+
+static int igt_fill_blt(void *arg)
+{
+ struct intel_context *ce = arg;
+ struct drm_i915_private *i915 = ce->gem_context->i915;
+ struct drm_i915_gem_object *obj;
+ struct rnd_state prng;
+ IGT_TIMEOUT(end);
+ u32 *vaddr;
+ int err = 0;
+
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+
+ do {
+ u32 sz = prandom_u32_state(&prng) % SZ_32M;
+ u32 val = prandom_u32_state(&prng);
+ u32 i;
+
+ sz = round_up(sz, PAGE_SIZE);
+
+ pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);
+
+ obj = i915_gem_object_create_internal(i915, sz);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_flush;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_put;
+ }
+
+ /*
+ * Make sure the potentially async clflush does its job, if
+ * required.
+ */
+ memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32));
+
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
+ obj->cache_dirty = true;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_gem_object_fill_blt(obj, ce, val);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err)
+ goto err_unpin;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_cpu_domain(obj, false);
+ i915_gem_object_unlock(obj);
+ if (err)
+ goto err_unpin;
+
+ for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
+ if (vaddr[i] != val) {
+ pr_err("vaddr[%u]=%x, expected=%x\n", i,
+ vaddr[i], val);
+ err = -EINVAL;
+ goto err_unpin;
+ }
+ }
+
+ i915_gem_object_unpin_map(obj);
+ i915_gem_object_put(obj);
+ } while (!time_after(jiffies, end));
+
+ goto err_flush;
+
+err_unpin:
+ i915_gem_object_unpin_map(obj);
+err_put:
+ i915_gem_object_put(obj);
+err_flush:
+ mutex_lock(&i915->drm.struct_mutex);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ return err;
+}
+
+int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_fill_blt),
+ };
+
+ if (i915_terminally_wedged(i915))
+ return 0;
+
+ if (!HAS_ENGINE(i915, BCS0))
+ return 0;
+
+ return i915_subtests(tests, i915->engine[BCS0]->kernel_context);
+}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
new file mode 100644
index 000000000000..94a15e3f6db8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
@@ -0,0 +1,80 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+
+#include "selftests/mock_gem_device.h"
+
+static int mock_phys_object(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ /* Create an object and bind it to a contiguous set of physical pages,
+ * i.e. exercise the i915_gem_object_phys API.
+ */
+
+ obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ pr_err("i915_gem_object_create failed, err=%d\n", err);
+ goto out;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err) {
+ pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
+ goto out_obj;
+ }
+
+ if (obj->ops != &i915_gem_phys_ops) {
+ pr_err("i915_gem_object_attach_phys did not create a phys object\n");
+ err = -EINVAL;
+ goto out_obj;
+ }
+
+ if (!atomic_read(&obj->mm.pages_pin_count)) {
+ pr_err("i915_gem_object_attach_phys did not pin its phys pages\n");
+ err = -EINVAL;
+ goto out_obj;
+ }
+
+ /* Make the object dirty so that put_pages must do copy back the data */
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err) {
+ pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n",
+ err);
+ goto out_obj;
+ }
+
+out_obj:
+ i915_gem_object_put(obj);
+out:
+ return err;
+}
+
+int i915_gem_phys_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(mock_phys_object),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ err = i915_subtests(tests, i915);
+
+ drm_dev_put(&i915->drm);
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
new file mode 100644
index 000000000000..b232e6d2cd92
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -0,0 +1,34 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "igt_gem_utils.h"
+
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_context.h"
+
+#include "i915_request.h"
+
+struct i915_request *
+igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ /*
+ * Pinning the contexts may generate requests in order to acquire
+ * GGTT space, so do this first before we reserve a seqno for
+ * ourselves.
+ */
+ ce = i915_gem_context_get_engine(ctx, engine->id);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+
+ return rq;
+}
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
new file mode 100644
index 000000000000..0f17251cf75d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
@@ -0,0 +1,17 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef __IGT_GEM_UTILS_H__
+#define __IGT_GEM_UTILS_H__
+
+struct i915_request;
+struct i915_gem_context;
+struct intel_engine_cs;
+
+struct i915_request *
+igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
+
+#endif /* __IGT_GEM_UTILS_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index 0426093bf1d9..be8974ccff24 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -1,35 +1,18 @@
/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2016 Intel Corporation
*/
#include "mock_context.h"
-#include "mock_gtt.h"
+#include "selftests/mock_gtt.h"
struct i915_gem_context *
mock_context(struct drm_i915_private *i915,
const char *name)
{
struct i915_gem_context *ctx;
+ struct i915_gem_engines *e;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -40,21 +23,22 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
- ctx->hw_contexts = RB_ROOT;
- spin_lock_init(&ctx->hw_contexts_lock);
+ mutex_init(&ctx->engines_mutex);
+ e = default_engines(ctx);
+ if (IS_ERR(e))
+ goto err_free;
+ RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
- INIT_LIST_HEAD(&ctx->handles_list);
INIT_LIST_HEAD(&ctx->hw_id_link);
- INIT_LIST_HEAD(&ctx->active_engines);
mutex_init(&ctx->mutex);
ret = i915_gem_context_pin_hw_id(ctx);
if (ret < 0)
- goto err_handles;
+ goto err_engines;
if (name) {
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
ctx->name = kstrdup(name, GFP_KERNEL);
if (!ctx->name)
@@ -64,12 +48,15 @@ mock_context(struct drm_i915_private *i915,
if (!ppgtt)
goto err_put;
- __set_ppgtt(ctx, ppgtt);
+ __set_ppgtt(ctx, &ppgtt->vm);
+ i915_vm_put(&ppgtt->vm);
}
return ctx;
-err_handles:
+err_engines:
+ free_engines(rcu_access_pointer(ctx->engines));
+err_free:
kfree(ctx);
return NULL;
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.h b/drivers/gpu/drm/i915/gem/selftests/mock_context.h
new file mode 100644
index 000000000000..0b926653914f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.h
@@ -0,0 +1,24 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef __MOCK_CONTEXT_H
+#define __MOCK_CONTEXT_H
+
+void mock_init_contexts(struct drm_i915_private *i915);
+
+struct i915_gem_context *
+mock_context(struct drm_i915_private *i915,
+ const char *name);
+
+void mock_context_close(struct i915_gem_context *ctx);
+
+struct i915_gem_context *
+live_context(struct drm_i915_private *i915, struct drm_file *file);
+
+struct i915_gem_context *kernel_context(struct drm_i915_private *i915);
+void kernel_context_close(struct i915_gem_context *ctx);
+
+#endif /* !__MOCK_CONTEXT_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
index ca682caf1062..b9e059d4328a 100644
--- a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
@@ -1,25 +1,7 @@
/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2016 Intel Corporation
*/
#include "mock_dmabuf.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h
new file mode 100644
index 000000000000..f0f8bbd82dfc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h
@@ -0,0 +1,22 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef __MOCK_DMABUF_H__
+#define __MOCK_DMABUF_H__
+
+#include <linux/dma-buf.h>
+
+struct mock_dmabuf {
+ int npages;
+ struct page *pages[];
+};
+
+static struct mock_dmabuf *to_mock(struct dma_buf *buf)
+{
+ return buf->priv;
+}
+
+#endif /* !__MOCK_DMABUF_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_object.h b/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h
index 20acdbee7bd0..370360b4a148 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h
@@ -1,4 +1,9 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
#ifndef __MOCK_GEM_OBJECT_H__
#define __MOCK_GEM_OBJECT_H__
diff --git a/drivers/gpu/drm/i915/gt/Makefile b/drivers/gpu/drm/i915/gt/Makefile
new file mode 100644
index 000000000000..1c75b5c9790c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/Makefile
@@ -0,0 +1,2 @@
+# Extra header tests
+include $(src)/Makefile.header-test
diff --git a/drivers/gpu/drm/i915/gt/Makefile.header-test b/drivers/gpu/drm/i915/gt/Makefile.header-test
new file mode 100644
index 000000000000..61e06cbb4b32
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/Makefile.header-test
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: MIT
+# Copyright © 2019 Intel Corporation
+
+# Test the headers are compilable as standalone units
+header_test := $(notdir $(wildcard $(src)/*.h))
+
+quiet_cmd_header_test = HDRTEST $@
+ cmd_header_test = echo "\#include \"$(<F)\"" > $@
+
+header_test_%.c: %.h
+ $(call cmd,header_test)
+
+extra-$(CONFIG_DRM_I915_WERROR) += \
+ $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
+
+clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 832cb6b1e9bd..c092bdf5f0bf 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -81,6 +81,22 @@ static inline bool __request_completed(const struct i915_request *rq)
return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
}
+__maybe_unused static bool
+check_signal_order(struct intel_context *ce, struct i915_request *rq)
+{
+ if (!list_is_last(&rq->signal_link, &ce->signals) &&
+ i915_seqno_passed(rq->fence.seqno,
+ list_next_entry(rq, signal_link)->fence.seqno))
+ return false;
+
+ if (!list_is_first(&rq->signal_link, &ce->signals) &&
+ i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno,
+ rq->fence.seqno))
+ return false;
+
+ return true;
+}
+
static bool
__dma_fence_signal(struct dma_fence *fence)
{
@@ -130,6 +146,8 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
struct i915_request *rq =
list_entry(pos, typeof(*rq), signal_link);
+ GEM_BUG_ON(!check_signal_order(ce, rq));
+
if (!__request_completed(rq))
break;
@@ -312,6 +330,7 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
list_add(&rq->signal_link, pos);
if (pos == &ce->signals) /* catch transitions from empty list */
list_move_tail(&ce->signal_link, &b->signalers);
+ GEM_BUG_ON(!check_signal_order(ce, rq));
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
spin_unlock(&b->irq_lock);
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
new file mode 100644
index 000000000000..2c454f227c2e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -0,0 +1,241 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_pm.h"
+
+#include "i915_drv.h"
+#include "i915_globals.h"
+
+#include "intel_context.h"
+#include "intel_engine.h"
+#include "intel_engine_pm.h"
+
+static struct i915_global_context {
+ struct i915_global base;
+ struct kmem_cache *slab_ce;
+} global;
+
+static struct intel_context *intel_context_alloc(void)
+{
+ return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
+}
+
+void intel_context_free(struct intel_context *ce)
+{
+ kmem_cache_free(global.slab_ce, ce);
+}
+
+struct intel_context *
+intel_context_create(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+
+ ce = intel_context_alloc();
+ if (!ce)
+ return ERR_PTR(-ENOMEM);
+
+ intel_context_init(ce, ctx, engine);
+ return ce;
+}
+
+int __intel_context_do_pin(struct intel_context *ce)
+{
+ int err;
+
+ if (mutex_lock_interruptible(&ce->pin_mutex))
+ return -EINTR;
+
+ if (likely(!atomic_read(&ce->pin_count))) {
+ intel_wakeref_t wakeref;
+
+ err = 0;
+ with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
+ err = ce->ops->pin(ce);
+ if (err)
+ goto err;
+
+ i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
+
+ smp_mb__before_atomic(); /* flush pin before it is visible */
+ }
+
+ atomic_inc(&ce->pin_count);
+ GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
+
+ mutex_unlock(&ce->pin_mutex);
+ return 0;
+
+err:
+ mutex_unlock(&ce->pin_mutex);
+ return err;
+}
+
+void intel_context_unpin(struct intel_context *ce)
+{
+ if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
+ return;
+
+ /* We may be called from inside intel_context_pin() to evict another */
+ intel_context_get(ce);
+ mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
+
+ if (likely(atomic_dec_and_test(&ce->pin_count))) {
+ ce->ops->unpin(ce);
+
+ i915_gem_context_put(ce->gem_context);
+ intel_context_active_release(ce);
+ }
+
+ mutex_unlock(&ce->pin_mutex);
+ intel_context_put(ce);
+}
+
+static int __context_pin_state(struct i915_vma *vma, unsigned long flags)
+{
+ int err;
+
+ err = i915_vma_pin(vma, 0, 0, flags | PIN_GLOBAL);
+ if (err)
+ return err;
+
+ /*
+ * And mark it as a globally pinned object to let the shrinker know
+ * it cannot reclaim the object until we release it.
+ */
+ vma->obj->pin_global++;
+ vma->obj->mm.dirty = true;
+
+ return 0;
+}
+
+static void __context_unpin_state(struct i915_vma *vma)
+{
+ vma->obj->pin_global--;
+ __i915_vma_unpin(vma);
+}
+
+static void intel_context_retire(struct i915_active *active)
+{
+ struct intel_context *ce = container_of(active, typeof(*ce), active);
+
+ if (ce->state)
+ __context_unpin_state(ce->state);
+
+ intel_context_put(ce);
+}
+
+void
+intel_context_init(struct intel_context *ce,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ GEM_BUG_ON(!engine->cops);
+
+ kref_init(&ce->ref);
+
+ ce->gem_context = ctx;
+ ce->engine = engine;
+ ce->ops = engine->cops;
+ ce->sseu = engine->sseu;
+
+ INIT_LIST_HEAD(&ce->signal_link);
+ INIT_LIST_HEAD(&ce->signals);
+
+ mutex_init(&ce->pin_mutex);
+
+ i915_active_init(ctx->i915, &ce->active, intel_context_retire);
+}
+
+int intel_context_active_acquire(struct intel_context *ce, unsigned long flags)
+{
+ int err;
+
+ if (!i915_active_acquire(&ce->active))
+ return 0;
+
+ intel_context_get(ce);
+
+ if (!ce->state)
+ return 0;
+
+ err = __context_pin_state(ce->state, flags);
+ if (err) {
+ i915_active_cancel(&ce->active);
+ intel_context_put(ce);
+ return err;
+ }
+
+ /* Preallocate tracking nodes */
+ if (!i915_gem_context_is_kernel(ce->gem_context)) {
+ err = i915_active_acquire_preallocate_barrier(&ce->active,
+ ce->engine);
+ if (err) {
+ i915_active_release(&ce->active);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+void intel_context_active_release(struct intel_context *ce)
+{
+ /* Nodes preallocated in intel_context_active() */
+ i915_active_acquire_barrier(&ce->active);
+ i915_active_release(&ce->active);
+}
+
+static void i915_global_context_shrink(void)
+{
+ kmem_cache_shrink(global.slab_ce);
+}
+
+static void i915_global_context_exit(void)
+{
+ kmem_cache_destroy(global.slab_ce);
+}
+
+static struct i915_global_context global = { {
+ .shrink = i915_global_context_shrink,
+ .exit = i915_global_context_exit,
+} };
+
+int __init i915_global_context_init(void)
+{
+ global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_ce)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
+
+void intel_context_enter_engine(struct intel_context *ce)
+{
+ intel_engine_pm_get(ce->engine);
+}
+
+void intel_context_exit_engine(struct intel_context *ce)
+{
+ intel_engine_pm_put(ce->engine);
+}
+
+struct i915_request *intel_context_create_request(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ int err;
+
+ err = intel_context_pin(ce);
+ if (unlikely(err))
+ return ERR_PTR(err);
+
+ rq = i915_request_create(ce);
+ intel_context_unpin(ce);
+
+ return rq;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
new file mode 100644
index 000000000000..a47275bc4f01
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -0,0 +1,134 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CONTEXT_H__
+#define __INTEL_CONTEXT_H__
+
+#include <linux/lockdep.h>
+
+#include "intel_context_types.h"
+#include "intel_engine_types.h"
+
+void intel_context_init(struct intel_context *ce,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+
+struct intel_context *
+intel_context_create(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+
+void intel_context_free(struct intel_context *ce);
+
+/**
+ * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
+ * @ce - the context
+ *
+ * Acquire a lock on the pinned status of the HW context, such that the context
+ * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
+ * intel_context_is_pinned() remains stable.
+ */
+static inline int intel_context_lock_pinned(struct intel_context *ce)
+ __acquires(ce->pin_mutex)
+{
+ return mutex_lock_interruptible(&ce->pin_mutex);
+}
+
+/**
+ * intel_context_is_pinned - Reports the 'pinned' status
+ * @ce - the context
+ *
+ * While in use by the GPU, the context, along with its ring and page
+ * tables is pinned into memory and the GTT.
+ *
+ * Returns: true if the context is currently pinned for use by the GPU.
+ */
+static inline bool
+intel_context_is_pinned(struct intel_context *ce)
+{
+ return atomic_read(&ce->pin_count);
+}
+
+/**
+ * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
+ * @ce - the context
+ *
+ * Releases the lock earlier acquired by intel_context_unlock_pinned().
+ */
+static inline void intel_context_unlock_pinned(struct intel_context *ce)
+ __releases(ce->pin_mutex)
+{
+ mutex_unlock(&ce->pin_mutex);
+}
+
+int __intel_context_do_pin(struct intel_context *ce);
+
+static inline int intel_context_pin(struct intel_context *ce)
+{
+ if (likely(atomic_inc_not_zero(&ce->pin_count)))
+ return 0;
+
+ return __intel_context_do_pin(ce);
+}
+
+static inline void __intel_context_pin(struct intel_context *ce)
+{
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
+ atomic_inc(&ce->pin_count);
+}
+
+void intel_context_unpin(struct intel_context *ce);
+
+void intel_context_enter_engine(struct intel_context *ce);
+void intel_context_exit_engine(struct intel_context *ce);
+
+static inline void intel_context_enter(struct intel_context *ce)
+{
+ if (!ce->active_count++)
+ ce->ops->enter(ce);
+}
+
+static inline void intel_context_mark_active(struct intel_context *ce)
+{
+ ++ce->active_count;
+}
+
+static inline void intel_context_exit(struct intel_context *ce)
+{
+ GEM_BUG_ON(!ce->active_count);
+ if (!--ce->active_count)
+ ce->ops->exit(ce);
+}
+
+int intel_context_active_acquire(struct intel_context *ce, unsigned long flags);
+void intel_context_active_release(struct intel_context *ce);
+
+static inline struct intel_context *intel_context_get(struct intel_context *ce)
+{
+ kref_get(&ce->ref);
+ return ce;
+}
+
+static inline void intel_context_put(struct intel_context *ce)
+{
+ kref_put(&ce->ref, ce->ops->destroy);
+}
+
+static inline int __must_check
+intel_context_timeline_lock(struct intel_context *ce)
+ __acquires(&ce->ring->timeline->mutex)
+{
+ return mutex_lock_interruptible(&ce->ring->timeline->mutex);
+}
+
+static inline void intel_context_timeline_unlock(struct intel_context *ce)
+ __releases(&ce->ring->timeline->mutex)
+{
+ mutex_unlock(&ce->ring->timeline->mutex);
+}
+
+struct i915_request *intel_context_create_request(struct intel_context *ce);
+
+#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 339c7437fe82..08049ee91cee 100644
--- a/drivers/gpu/drm/i915/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -10,11 +10,11 @@
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/rbtree.h>
#include <linux/types.h>
#include "i915_active_types.h"
#include "intel_engine_types.h"
+#include "intel_sseu.h"
struct i915_gem_context;
struct i915_vma;
@@ -25,28 +25,20 @@ struct intel_context_ops {
int (*pin)(struct intel_context *ce);
void (*unpin)(struct intel_context *ce);
+ void (*enter)(struct intel_context *ce);
+ void (*exit)(struct intel_context *ce);
+
void (*reset)(struct intel_context *ce);
void (*destroy)(struct kref *kref);
};
-/*
- * Powergating configuration for a particular (context,engine).
- */
-struct intel_sseu {
- u8 slice_mask;
- u8 subslice_mask;
- u8 min_eus_per_subslice;
- u8 max_eus_per_subslice;
-};
-
struct intel_context {
struct kref ref;
struct i915_gem_context *gem_context;
struct intel_engine_cs *engine;
- struct intel_engine_cs *active;
+ struct intel_engine_cs *inflight;
- struct list_head active_link;
struct list_head signal_link;
struct list_head signals;
@@ -56,19 +48,18 @@ struct intel_context {
u32 *lrc_reg_state;
u64 lrc_desc;
+ unsigned int active_count; /* notionally protected by timeline->mutex */
+
atomic_t pin_count;
struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
- intel_engine_mask_t saturated; /* submitting semaphores too late? */
-
/**
- * active_tracker: Active tracker for the external rq activity
- * on this intel_context object.
+ * active: Active tracker for the rq activity (inc. external) on this
+ * intel_context object.
*/
- struct i915_active_request active_tracker;
+ struct i915_active active;
const struct intel_context_ops *ops;
- struct rb_node node;
/** sseu: Control eu/slice partitioning */
struct intel_sseu sseu;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 72c7c337ace9..2f1c6871ee95 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -52,6 +52,7 @@ struct drm_printer;
#define ENGINE_READ(...) __ENGINE_READ_OP(read, __VA_ARGS__)
#define ENGINE_READ_FW(...) __ENGINE_READ_OP(read_fw, __VA_ARGS__)
#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read, __VA_ARGS__)
+#define ENGINE_POSTING_READ16(...) __ENGINE_READ_OP(posting_read16, __VA_ARGS__)
#define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
__ENGINE_REG_OP(read64_2x32, (engine__), \
@@ -68,6 +69,24 @@ struct drm_printer;
#define ENGINE_WRITE(...) __ENGINE_WRITE_OP(write, __VA_ARGS__)
#define ENGINE_WRITE_FW(...) __ENGINE_WRITE_OP(write_fw, __VA_ARGS__)
+#define GEN6_RING_FAULT_REG_READ(engine__) \
+ intel_uncore_read((engine__)->uncore, RING_FAULT_REG(engine__))
+
+#define GEN6_RING_FAULT_REG_POSTING_READ(engine__) \
+ intel_uncore_posting_read((engine__)->uncore, RING_FAULT_REG(engine__))
+
+#define GEN6_RING_FAULT_REG_RMW(engine__, clear__, set__) \
+({ \
+ u32 __val; \
+\
+ __val = intel_uncore_read((engine__)->uncore, \
+ RING_FAULT_REG(engine__)); \
+ __val &= ~(clear__); \
+ __val |= (set__); \
+ intel_uncore_write((engine__)->uncore, RING_FAULT_REG(engine__), \
+ __val); \
+})
+
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
*/
@@ -106,24 +125,6 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);
-static inline bool __execlists_need_preempt(int prio, int last)
-{
- /*
- * Allow preemption of low -> normal -> high, but we do
- * not allow low priority tasks to preempt other low priority
- * tasks under the impression that latency for low priority
- * tasks does not matter (as much as background throughput),
- * so kiss.
- *
- * More naturally we would write
- * prio >= max(0, last);
- * except that we wish to prevent triggering preemption at the same
- * priority level: the task that is running should remain running
- * to preserve FIFO ordering of dependencies.
- */
- return prio > max(I915_PRIORITY_NORMAL - 1, last);
-}
-
static inline void
execlists_set_active(struct intel_engine_execlists *execlists,
unsigned int bit)
@@ -233,8 +234,6 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
*/
#define I915_GEM_HWS_PREEMPT 0x32
#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT * sizeof(u32))
-#define I915_GEM_HWS_HANGCHECK 0x34
-#define I915_GEM_HWS_HANGCHECK_ADDR (I915_GEM_HWS_HANGCHECK * sizeof(u32))
#define I915_GEM_HWS_SEQNO 0x40
#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
#define I915_GEM_HWS_SCRATCH 0x80
@@ -362,14 +361,16 @@ __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
return (head - tail - CACHELINE_BYTES) & (size - 1);
}
-int intel_engine_setup_common(struct intel_engine_cs *engine);
+int intel_engines_init_mmio(struct drm_i915_private *i915);
+int intel_engines_setup(struct drm_i915_private *i915);
+int intel_engines_init(struct drm_i915_private *i915);
+void intel_engines_cleanup(struct drm_i915_private *i915);
+
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
-int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
-int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
-int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
-int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
+int intel_ring_submission_setup(struct intel_engine_cs *engine);
+int intel_ring_submission_init(struct intel_engine_cs *engine);
int intel_engine_stop_cs(struct intel_engine_cs *engine);
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
@@ -382,6 +383,8 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone);
+void intel_engine_init_execlists(struct intel_engine_cs *engine);
+
void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
@@ -458,19 +461,12 @@ static inline void intel_engine_reset(struct intel_engine_cs *engine,
{
if (engine->reset.reset)
engine->reset.reset(engine, stalled);
+ engine->serial++; /* contexts lost */
}
-void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
-void intel_gt_resume(struct drm_i915_private *i915);
-
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
-void intel_engine_lost_context(struct intel_engine_cs *engine);
-
-void intel_engines_park(struct drm_i915_private *i915);
-void intel_engines_unpark(struct drm_i915_private *i915);
-
void intel_engines_reset_default_submission(struct drm_i915_private *i915);
unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
@@ -547,6 +543,8 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
struct i915_request *
intel_engine_find_active_request(struct intel_engine_cs *engine);
+u32 intel_engine_context_size(struct drm_i915_private *i915, u8 class);
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
@@ -567,17 +565,10 @@ static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
#endif
-static inline u32
-intel_engine_next_hangcheck_seqno(struct intel_engine_cs *engine)
-{
- return engine->hangcheck.next_seqno =
- next_pseudo_random32(engine->hangcheck.next_seqno);
-}
-
-static inline u32
-intel_engine_get_hangcheck_seqno(struct intel_engine_cs *engine)
-{
- return intel_read_status_page(engine, I915_GEM_HWS_HANGCHECK);
-}
+void intel_engine_init_active(struct intel_engine_cs *engine,
+ unsigned int subclass);
+#define ENGINE_PHYSICAL 0
+#define ENGINE_MOCK 1
+#define ENGINE_VIRTUAL 2
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index eea9bec04f1b..7fd33e81c2d9 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -24,10 +24,15 @@
#include <drm/drm_print.h>
+#include "gem/i915_gem_context.h"
+
#include "i915_drv.h"
-#include "i915_reset.h"
-#include "intel_ringbuffer.h"
+
+#include "intel_engine.h"
+#include "intel_engine_pm.h"
+#include "intel_context.h"
#include "intel_lrc.h"
+#include "intel_reset.h"
/* Haswell does have the CXT_SIZE register however it does not appear to be
* valid. Now, docs explain in dwords what is in the context object. The full
@@ -48,35 +53,24 @@
struct engine_class_info {
const char *name;
- int (*init_legacy)(struct intel_engine_cs *engine);
- int (*init_execlists)(struct intel_engine_cs *engine);
-
u8 uabi_class;
};
static const struct engine_class_info intel_engine_classes[] = {
[RENDER_CLASS] = {
.name = "rcs",
- .init_execlists = logical_render_ring_init,
- .init_legacy = intel_init_render_ring_buffer,
.uabi_class = I915_ENGINE_CLASS_RENDER,
},
[COPY_ENGINE_CLASS] = {
.name = "bcs",
- .init_execlists = logical_xcs_ring_init,
- .init_legacy = intel_init_blt_ring_buffer,
.uabi_class = I915_ENGINE_CLASS_COPY,
},
[VIDEO_DECODE_CLASS] = {
.name = "vcs",
- .init_execlists = logical_xcs_ring_init,
- .init_legacy = intel_init_bsd_ring_buffer,
.uabi_class = I915_ENGINE_CLASS_VIDEO,
},
[VIDEO_ENHANCEMENT_CLASS] = {
.name = "vecs",
- .init_execlists = logical_xcs_ring_init,
- .init_legacy = intel_init_vebox_ring_buffer,
.uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE,
},
};
@@ -165,7 +159,7 @@ static const struct engine_info intel_engines[] = {
};
/**
- * ___intel_engine_context_size() - return the size of the context for an engine
+ * intel_engine_context_size() - return the size of the context for an engine
* @dev_priv: i915 device private
* @class: engine class
*
@@ -178,8 +172,7 @@ static const struct engine_info intel_engines[] = {
* in LRC mode, but does not include the "shared data page" used with
* GuC submission. The caller should account for this if using the GuC.
*/
-static u32
-__intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
+u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
{
u32 cxt_size;
@@ -212,6 +205,22 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
PAGE_SIZE);
case 5:
case 4:
+ /*
+ * There is a discrepancy here between the size reported
+ * by the register and the size of the context layout
+ * in the docs. Both are described as authorative!
+ *
+ * The discrepancy is on the order of a few cachelines,
+ * but the total is under one page (4k), which is our
+ * minimum allocation anyway so it should all come
+ * out in the wash.
+ */
+ cxt_size = I915_READ(CXT_SIZE) + 1;
+ DRM_DEBUG_DRIVER("gen%d CXT_SIZE = %d bytes [0x%08x]\n",
+ INTEL_GEN(dev_priv),
+ cxt_size * 64,
+ cxt_size - 1);
+ return round_up(cxt_size * 64, PAGE_SIZE);
case 3:
case 2:
/* For the special day when i810 gets merged. */
@@ -312,10 +321,16 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->class = info->class;
engine->instance = info->instance;
+ /*
+ * To be overridden by the backend on setup. However to facilitate
+ * cleanup on error during setup, we always provide the destroy vfunc.
+ */
+ engine->destroy = (typeof(engine->destroy))kfree;
+
engine->uabi_class = intel_engine_classes[info->class].uabi_class;
- engine->context_size = __intel_engine_context_size(dev_priv,
- engine->class);
+ engine->context_size = intel_engine_context_size(dev_priv,
+ engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0;
if (engine->context_size)
@@ -336,18 +351,70 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
return 0;
}
+static void __setup_engine_capabilities(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ if (engine->class == VIDEO_DECODE_CLASS) {
+ /*
+ * HEVC support is present on first engine instance
+ * before Gen11 and on all instances afterwards.
+ */
+ if (INTEL_GEN(i915) >= 11 ||
+ (INTEL_GEN(i915) >= 9 && engine->instance == 0))
+ engine->uabi_capabilities |=
+ I915_VIDEO_CLASS_CAPABILITY_HEVC;
+
+ /*
+ * SFC block is present only on even logical engine
+ * instances.
+ */
+ if ((INTEL_GEN(i915) >= 11 &&
+ RUNTIME_INFO(i915)->vdbox_sfc_access & engine->mask) ||
+ (INTEL_GEN(i915) >= 9 && engine->instance == 0))
+ engine->uabi_capabilities |=
+ I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
+ } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
+ if (INTEL_GEN(i915) >= 9)
+ engine->uabi_capabilities |=
+ I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
+ }
+}
+
+static void intel_setup_engine_capabilities(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id)
+ __setup_engine_capabilities(engine);
+}
+
+/**
+ * intel_engines_cleanup() - free the resources allocated for Command Streamers
+ * @i915: the i915 devic
+ */
+void intel_engines_cleanup(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id) {
+ engine->destroy(engine);
+ i915->engine[id] = NULL;
+ }
+}
+
/**
* intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
- * @dev_priv: i915 device private
+ * @i915: the i915 device
*
* Return: non-zero if the initialization failed.
*/
-int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
+int intel_engines_init_mmio(struct drm_i915_private *i915)
{
- struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
- const unsigned int engine_mask = INTEL_INFO(dev_priv)->engine_mask;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ struct intel_device_info *device_info = mkwrite_device_info(i915);
+ const unsigned int engine_mask = INTEL_INFO(i915)->engine_mask;
unsigned int mask = 0;
unsigned int i;
int err;
@@ -360,10 +427,10 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
- if (!HAS_ENGINE(dev_priv, i))
+ if (!HAS_ENGINE(i915, i))
continue;
- err = intel_engine_setup(dev_priv, i);
+ err = intel_engine_setup(i915, i);
if (err)
goto cleanup;
@@ -379,69 +446,52 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
device_info->engine_mask = mask;
/* We always presume we have at least RCS available for later probing */
- if (WARN_ON(!HAS_ENGINE(dev_priv, RCS0))) {
+ if (WARN_ON(!HAS_ENGINE(i915, RCS0))) {
err = -ENODEV;
goto cleanup;
}
- RUNTIME_INFO(dev_priv)->num_engines = hweight32(mask);
+ RUNTIME_INFO(i915)->num_engines = hweight32(mask);
- i915_check_and_clear_faults(dev_priv);
+ i915_check_and_clear_faults(i915);
+
+ intel_setup_engine_capabilities(i915);
return 0;
cleanup:
- for_each_engine(engine, dev_priv, id)
- kfree(engine);
+ intel_engines_cleanup(i915);
return err;
}
/**
* intel_engines_init() - init the Engine Command Streamers
- * @dev_priv: i915 device private
+ * @i915: i915 device private
*
* Return: non-zero if the initialization failed.
*/
-int intel_engines_init(struct drm_i915_private *dev_priv)
+int intel_engines_init(struct drm_i915_private *i915)
{
+ int (*init)(struct intel_engine_cs *engine);
struct intel_engine_cs *engine;
- enum intel_engine_id id, err_id;
+ enum intel_engine_id id;
int err;
- for_each_engine(engine, dev_priv, id) {
- const struct engine_class_info *class_info =
- &intel_engine_classes[engine->class];
- int (*init)(struct intel_engine_cs *engine);
-
- if (HAS_EXECLISTS(dev_priv))
- init = class_info->init_execlists;
- else
- init = class_info->init_legacy;
-
- err = -EINVAL;
- err_id = id;
-
- if (GEM_DEBUG_WARN_ON(!init))
- goto cleanup;
+ if (HAS_EXECLISTS(i915))
+ init = intel_execlists_submission_init;
+ else
+ init = intel_ring_submission_init;
+ for_each_engine(engine, i915, id) {
err = init(engine);
if (err)
goto cleanup;
-
- GEM_BUG_ON(!engine->submit_request);
}
return 0;
cleanup:
- for_each_engine(engine, dev_priv, id) {
- if (id >= err_id) {
- kfree(engine);
- dev_priv->engine[id] = NULL;
- } else {
- dev_priv->gt.cleanup_engine(engine);
- }
- }
+ intel_engines_cleanup(i915);
return err;
}
@@ -450,7 +500,7 @@ static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
i915_gem_batch_pool_init(&engine->batch_pool, engine);
}
-static void intel_engine_init_execlist(struct intel_engine_cs *engine)
+void intel_engine_init_execlists(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -477,7 +527,7 @@ static void cleanup_status_page(struct intel_engine_cs *engine)
i915_vma_unpin(vma);
i915_gem_object_unpin_map(vma->obj);
- __i915_gem_object_release_unless_active(vma->obj);
+ i915_gem_object_put(vma->obj);
}
static int pin_ggtt_status_page(struct intel_engine_cs *engine,
@@ -557,41 +607,71 @@ err:
return ret;
}
+static int intel_engine_setup_common(struct intel_engine_cs *engine)
+{
+ int err;
+
+ init_llist_head(&engine->barrier_tasks);
+
+ err = init_status_page(engine);
+ if (err)
+ return err;
+
+ intel_engine_init_active(engine, ENGINE_PHYSICAL);
+ intel_engine_init_breadcrumbs(engine);
+ intel_engine_init_execlists(engine);
+ intel_engine_init_hangcheck(engine);
+ intel_engine_init_batch_pool(engine);
+ intel_engine_init_cmd_parser(engine);
+ intel_engine_init__pm(engine);
+
+ /* Use the whole device by default */
+ engine->sseu =
+ intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
+
+ return 0;
+}
+
/**
- * intel_engines_setup_common - setup engine state not requiring hw access
- * @engine: Engine to setup.
+ * intel_engines_setup- setup engine state not requiring hw access
+ * @i915: Device to setup.
*
- * Initializes @engine@ structure members shared between legacy and execlists
+ * Initializes engine structure members shared between legacy and execlists
* submission modes which do not require hardware access.
*
* Typically done early in the submission mode specific engine setup stage.
*/
-int intel_engine_setup_common(struct intel_engine_cs *engine)
+int intel_engines_setup(struct drm_i915_private *i915)
{
+ int (*setup)(struct intel_engine_cs *engine);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int err;
- err = init_status_page(engine);
- if (err)
- return err;
+ if (HAS_EXECLISTS(i915))
+ setup = intel_execlists_submission_setup;
+ else
+ setup = intel_ring_submission_setup;
- err = i915_timeline_init(engine->i915,
- &engine->timeline,
- engine->status_page.vma);
- if (err)
- goto err_hwsp;
+ for_each_engine(engine, i915, id) {
+ err = intel_engine_setup_common(engine);
+ if (err)
+ goto cleanup;
- i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
+ err = setup(engine);
+ if (err)
+ goto cleanup;
- intel_engine_init_breadcrumbs(engine);
- intel_engine_init_execlist(engine);
- intel_engine_init_hangcheck(engine);
- intel_engine_init_batch_pool(engine);
- intel_engine_init_cmd_parser(engine);
+ /* We expect the backend to take control over its state */
+ GEM_BUG_ON(engine->destroy == (typeof(engine->destroy))kfree);
+
+ GEM_BUG_ON(!engine->cops);
+ }
return 0;
-err_hwsp:
- cleanup_status_page(engine);
+cleanup:
+ intel_engines_cleanup(i915);
return err;
}
@@ -675,6 +755,7 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
goto out_timeline;
dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
+ GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
i915_timeline_unpin(&frame->timeline);
@@ -690,15 +771,42 @@ static int pin_context(struct i915_gem_context *ctx,
struct intel_context **out)
{
struct intel_context *ce;
+ int err;
- ce = intel_context_pin(ctx, engine);
+ ce = i915_gem_context_get_engine(ctx, engine->id);
if (IS_ERR(ce))
return PTR_ERR(ce);
+ err = intel_context_pin(ce);
+ intel_context_put(ce);
+ if (err)
+ return err;
+
*out = ce;
return 0;
}
+void
+intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
+{
+ INIT_LIST_HEAD(&engine->active.requests);
+
+ spin_lock_init(&engine->active.lock);
+ lockdep_set_subclass(&engine->active.lock, subclass);
+
+ /*
+ * Due to an interesting quirk in lockdep's internal debug tracking,
+ * after setting a subclass we must ensure the lock is used. Otherwise,
+ * nr_unused_locks is incremented once too often.
+ */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ local_irq_disable();
+ lock_map_acquire(&engine->active.lock.dep_map);
+ lock_map_release(&engine->active.lock.dep_map);
+ local_irq_enable();
+#endif
+}
+
/**
* intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize.
@@ -753,30 +861,6 @@ err_unpin:
return ret;
}
-void intel_gt_resume(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /*
- * After resume, we may need to poke into the pinned kernel
- * contexts to paper over any damage caused by the sudden suspend.
- * Only the kernel contexts should remain pinned over suspend,
- * allowing us to fixup the user contexts on their first pin.
- */
- for_each_engine(engine, i915, id) {
- struct intel_context *ce;
-
- ce = engine->kernel_context;
- if (ce)
- ce->ops->reset(ce);
-
- ce = engine->preempt_context;
- if (ce)
- ce->ops->reset(ce);
- }
-}
-
/**
* intel_engines_cleanup_common - cleans up the engine state created by
* the common initiailizers.
@@ -786,6 +870,8 @@ void intel_gt_resume(struct drm_i915_private *i915)
*/
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
+ GEM_BUG_ON(!list_empty(&engine->active.requests));
+
cleanup_status_page(engine);
intel_engine_fini_breadcrumbs(engine);
@@ -798,8 +884,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->preempt_context)
intel_context_unpin(engine->preempt_context);
intel_context_unpin(engine->kernel_context);
-
- i915_timeline_fini(&engine->timeline);
+ GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
intel_wa_list_free(&engine->ctx_wa_list);
intel_wa_list_free(&engine->wa_list);
@@ -900,11 +985,12 @@ u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
return mcr_s_ss_select;
}
-static inline u32
-read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
- int subslice, i915_reg_t reg)
+static u32
+read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice,
+ i915_reg_t reg)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
u32 mcr_slice_subslice_mask;
u32 mcr_slice_subslice_select;
u32 default_mcr_s_ss_select;
@@ -912,7 +998,7 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
u32 ret;
enum forcewake_domains fw_domains;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (INTEL_GEN(i915) >= 11) {
mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
GEN11_MCR_SUBSLICE_MASK;
mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) |
@@ -924,7 +1010,7 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
GEN8_MCR_SUBSLICE(subslice);
}
- default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);
+ default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(i915);
fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ);
@@ -961,7 +1047,7 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
struct intel_uncore *uncore = engine->uncore;
u32 mmio_base = engine->mmio_base;
int slice;
@@ -969,7 +1055,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
memset(instdone, 0, sizeof(*instdone));
- switch (INTEL_GEN(dev_priv)) {
+ switch (INTEL_GEN(i915)) {
default:
instdone->instdone =
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
@@ -979,12 +1065,12 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
- for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
+ for_each_instdone_slice_subslice(i915, slice, subslice) {
instdone->sampler[slice][subslice] =
- read_subslice_reg(dev_priv, slice, subslice,
+ read_subslice_reg(engine, slice, subslice,
GEN7_SAMPLER_INSTDONE);
instdone->row[slice][subslice] =
- read_subslice_reg(dev_priv, slice, subslice,
+ read_subslice_reg(engine, slice, subslice,
GEN7_ROW_INSTDONE);
}
break;
@@ -1030,7 +1116,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
return true;
/* If the whole device is asleep, the engine must be idle */
- wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+ wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
if (!wakeref)
return true;
@@ -1044,7 +1130,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
!(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
idle = false;
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return idle;
}
@@ -1062,10 +1148,15 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
if (i915_reset_failed(engine->i915))
return true;
+ if (!intel_wakeref_active(&engine->wakeref))
+ return true;
+
/* Waiting to drain ELSP? */
if (READ_ONCE(engine->execlists.active)) {
struct tasklet_struct *t = &engine->execlists.tasklet;
+ synchronize_hardirq(engine->i915->drm.irq);
+
local_bh_disable();
if (tasklet_trylock(t)) {
/* Must wait for any GPU reset in progress. */
@@ -1123,137 +1214,6 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915)
engine->set_default_submission(engine);
}
-static bool reset_engines(struct drm_i915_private *i915)
-{
- if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
- return false;
-
- return intel_gpu_reset(i915, ALL_ENGINES) == 0;
-}
-
-/**
- * intel_engines_sanitize: called after the GPU has lost power
- * @i915: the i915 device
- * @force: ignore a failed reset and sanitize engine state anyway
- *
- * Anytime we reset the GPU, either with an explicit GPU reset or through a
- * PCI power cycle, the GPU loses state and we must reset our state tracking
- * to match. Note that calling intel_engines_sanitize() if the GPU has not
- * been reset results in much confusion!
- */
-void intel_engines_sanitize(struct drm_i915_private *i915, bool force)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- GEM_TRACE("\n");
-
- if (!reset_engines(i915) && !force)
- return;
-
- for_each_engine(engine, i915, id)
- intel_engine_reset(engine, false);
-}
-
-/**
- * intel_engines_park: called when the GT is transitioning from busy->idle
- * @i915: the i915 device
- *
- * The GT is now idle and about to go to sleep (maybe never to wake again?).
- * Time for us to tidy and put away our toys (release resources back to the
- * system).
- */
-void intel_engines_park(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, i915, id) {
- /* Flush the residual irq tasklets first. */
- intel_engine_disarm_breadcrumbs(engine);
- tasklet_kill(&engine->execlists.tasklet);
-
- /*
- * We are committed now to parking the engines, make sure there
- * will be no more interrupts arriving later and the engines
- * are truly idle.
- */
- if (wait_for(intel_engine_is_idle(engine), 10)) {
- struct drm_printer p = drm_debug_printer(__func__);
-
- dev_err(i915->drm.dev,
- "%s is not idle before parking\n",
- engine->name);
- intel_engine_dump(engine, &p, NULL);
- }
-
- /* Must be reset upon idling, or we may miss the busy wakeup. */
- GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
-
- if (engine->park)
- engine->park(engine);
-
- if (engine->pinned_default_state) {
- i915_gem_object_unpin_map(engine->default_state);
- engine->pinned_default_state = NULL;
- }
-
- i915_gem_batch_pool_fini(&engine->batch_pool);
- engine->execlists.no_priolist = false;
- }
-
- i915->gt.active_engines = 0;
-}
-
-/**
- * intel_engines_unpark: called when the GT is transitioning from idle->busy
- * @i915: the i915 device
- *
- * The GT was idle and now about to fire up with some new user requests.
- */
-void intel_engines_unpark(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, i915, id) {
- void *map;
-
- /* Pin the default state for fast resets from atomic context. */
- map = NULL;
- if (engine->default_state)
- map = i915_gem_object_pin_map(engine->default_state,
- I915_MAP_WB);
- if (!IS_ERR_OR_NULL(map))
- engine->pinned_default_state = map;
-
- if (engine->unpark)
- engine->unpark(engine);
-
- intel_engine_init_hangcheck(engine);
- }
-}
-
-/**
- * intel_engine_lost_context: called when the GPU is reset into unknown state
- * @engine: the engine
- *
- * We have either reset the GPU or otherwise about to lose state tracking of
- * the current GPU logical state (e.g. suspend). On next use, it is therefore
- * imperative that we make no presumptions about the current state and load
- * from scratch.
- */
-void intel_engine_lost_context(struct intel_engine_cs *engine)
-{
- struct intel_context *ce;
-
- lockdep_assert_held(&engine->i915->drm.struct_mutex);
-
- ce = fetch_and_zero(&engine->last_retired_context);
- if (ce)
- intel_context_unpin(ce);
-}
-
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
{
switch (INTEL_GEN(engine->i915)) {
@@ -1312,8 +1272,11 @@ static void print_request(struct drm_printer *m,
i915_request_completed(rq) ? "!" :
i915_request_started(rq) ? "*" :
"",
+ test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &rq->fence.flags) ? "+" :
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &rq->fence.flags) ? "+" : "",
+ &rq->fence.flags) ? "-" :
+ "",
buf,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
name);
@@ -1348,12 +1311,13 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
}
}
-static void intel_engine_print_registers(const struct intel_engine_cs *engine,
+static void intel_engine_print_registers(struct intel_engine_cs *engine,
struct drm_printer *m)
{
struct drm_i915_private *dev_priv = engine->i915;
const struct intel_engine_execlists * const execlists =
&engine->execlists;
+ unsigned long flags;
u64 addr;
if (engine->id == RCS0 && IS_GEN_RANGE(dev_priv, 4, 7))
@@ -1434,15 +1398,16 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
idx, hws[idx * 2], hws[idx * 2 + 1]);
}
- rcu_read_lock();
+ spin_lock_irqsave(&engine->active.lock, flags);
for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
struct i915_request *rq;
unsigned int count;
+ char hdr[80];
rq = port_unpack(&execlists->port[idx], &count);
- if (rq) {
- char hdr[80];
-
+ if (!rq) {
+ drm_printf(m, "\t\tELSP[%d] idle\n", idx);
+ } else if (!i915_request_signaled(rq)) {
snprintf(hdr, sizeof(hdr),
"\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
idx, count,
@@ -1451,11 +1416,11 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
hwsp_seqno(rq));
print_request(m, rq, hdr);
} else {
- drm_printf(m, "\t\tELSP[%d] idle\n", idx);
+ print_request(m, rq, "\t\tELSP[%d] rq: ");
}
}
drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
- rcu_read_unlock();
+ spin_unlock_irqrestore(&engine->active.lock, flags);
} else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
ENGINE_READ(engine, RING_PP_DIR_BASE));
@@ -1518,9 +1483,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
if (i915_reset_failed(engine->i915))
drm_printf(m, "*** WEDGED ***\n");
- drm_printf(m, "\tHangcheck %x:%x [%d ms]\n",
- engine->hangcheck.last_seqno,
- engine->hangcheck.next_seqno,
+ drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
+ drm_printf(m, "\tHangcheck: %d ms ago\n",
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
drm_printf(m, "\tReset count: %d (global %d)\n",
i915_reset_engine_count(error, engine),
@@ -1530,16 +1494,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "\tRequests:\n");
- rq = list_first_entry(&engine->timeline.requests,
- struct i915_request, link);
- if (&rq->link != &engine->timeline.requests)
- print_request(m, rq, "\t\tfirst ");
-
- rq = list_last_entry(&engine->timeline.requests,
- struct i915_request, link);
- if (&rq->link != &engine->timeline.requests)
- print_request(m, rq, "\t\tlast ");
-
rq = intel_engine_find_active_request(engine);
if (rq) {
print_request(m, rq, "\t\tactive ");
@@ -1562,10 +1516,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
rcu_read_unlock();
- wakeref = intel_runtime_pm_get_if_in_use(engine->i915);
+ wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
if (wakeref) {
intel_engine_print_registers(engine, m);
- intel_runtime_pm_put(engine->i915, wakeref);
+ intel_runtime_pm_put(&engine->i915->runtime_pm, wakeref);
} else {
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
@@ -1620,7 +1574,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
if (!intel_engine_supports_stats(engine))
return -ENODEV;
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
write_seqlock(&engine->stats.lock);
if (unlikely(engine->stats.enabled == ~0)) {
@@ -1646,7 +1600,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
unlock:
write_sequnlock(&engine->stats.lock);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
return err;
}
@@ -1731,27 +1685,26 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
* At all other times, we must assume the GPU is still running, but
* we only care about the snapshot of this moment.
*/
- spin_lock_irqsave(&engine->timeline.lock, flags);
- list_for_each_entry(request, &engine->timeline.requests, link) {
+ spin_lock_irqsave(&engine->active.lock, flags);
+ list_for_each_entry(request, &engine->active.requests, sched.link) {
if (i915_request_completed(request))
continue;
if (!i915_request_started(request))
- break;
+ continue;
/* More than one preemptible request may match! */
if (!match_ring(request))
- break;
+ continue;
active = request;
break;
}
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
return active;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/mock_engine.c"
-#include "selftests/intel_engine_cs.c"
+#include "selftest_engine_cs.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
new file mode 100644
index 000000000000..2ce00d3dc42a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -0,0 +1,168 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+
+#include "intel_engine.h"
+#include "intel_engine_pm.h"
+#include "intel_gt_pm.h"
+
+static int __engine_unpark(struct intel_wakeref *wf)
+{
+ struct intel_engine_cs *engine =
+ container_of(wf, typeof(*engine), wakeref);
+ void *map;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ intel_gt_pm_get(engine->i915);
+
+ /* Pin the default state for fast resets from atomic context. */
+ map = NULL;
+ if (engine->default_state)
+ map = i915_gem_object_pin_map(engine->default_state,
+ I915_MAP_WB);
+ if (!IS_ERR_OR_NULL(map))
+ engine->pinned_default_state = map;
+
+ if (engine->unpark)
+ engine->unpark(engine);
+
+ intel_engine_init_hangcheck(engine);
+ return 0;
+}
+
+void intel_engine_pm_get(struct intel_engine_cs *engine)
+{
+ intel_wakeref_get(&engine->i915->runtime_pm, &engine->wakeref, __engine_unpark);
+}
+
+void intel_engine_park(struct intel_engine_cs *engine)
+{
+ /*
+ * We are committed now to parking this engine, make sure there
+ * will be no more interrupts arriving later and the engine
+ * is truly idle.
+ */
+ if (wait_for(intel_engine_is_idle(engine), 10)) {
+ struct drm_printer p = drm_debug_printer(__func__);
+
+ dev_err(engine->i915->drm.dev,
+ "%s is not idle before parking\n",
+ engine->name);
+ intel_engine_dump(engine, &p, NULL);
+ }
+}
+
+static bool switch_to_kernel_context(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ /* Already inside the kernel context, safe to power down. */
+ if (engine->wakeref_serial == engine->serial)
+ return true;
+
+ /* GPU is pointing to the void, as good as in the kernel context. */
+ if (i915_reset_failed(engine->i915))
+ return true;
+
+ /*
+ * Note, we do this without taking the timeline->mutex. We cannot
+ * as we may be called while retiring the kernel context and so
+ * already underneath the timeline->mutex. Instead we rely on the
+ * exclusive property of the __engine_park that prevents anyone
+ * else from creating a request on this engine. This also requires
+ * that the ring is empty and we avoid any waits while constructing
+ * the context, as they assume protection by the timeline->mutex.
+ * This should hold true as we can only park the engine after
+ * retiring the last request, thus all rings should be empty and
+ * all timelines idle.
+ */
+ rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
+ if (IS_ERR(rq))
+ /* Context switch failed, hope for the best! Maybe reset? */
+ return true;
+
+ /* Check again on the next retirement. */
+ engine->wakeref_serial = engine->serial + 1;
+
+ i915_request_add_barriers(rq);
+ __i915_request_commit(rq);
+
+ return false;
+}
+
+static int __engine_park(struct intel_wakeref *wf)
+{
+ struct intel_engine_cs *engine =
+ container_of(wf, typeof(*engine), wakeref);
+
+ engine->saturated = 0;
+
+ /*
+ * If one and only one request is completed between pm events,
+ * we know that we are inside the kernel context and it is
+ * safe to power down. (We are paranoid in case that runtime
+ * suspend causes corruption to the active context image, and
+ * want to avoid that impacting userspace.)
+ */
+ if (!switch_to_kernel_context(engine))
+ return -EBUSY;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ intel_engine_disarm_breadcrumbs(engine);
+
+ /* Must be reset upon idling, or we may miss the busy wakeup. */
+ GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
+
+ if (engine->park)
+ engine->park(engine);
+
+ if (engine->pinned_default_state) {
+ i915_gem_object_unpin_map(engine->default_state);
+ engine->pinned_default_state = NULL;
+ }
+
+ engine->execlists.no_priolist = false;
+
+ intel_gt_pm_put(engine->i915);
+ return 0;
+}
+
+void intel_engine_pm_put(struct intel_engine_cs *engine)
+{
+ intel_wakeref_put(&engine->i915->runtime_pm, &engine->wakeref, __engine_park);
+}
+
+void intel_engine_init__pm(struct intel_engine_cs *engine)
+{
+ intel_wakeref_init(&engine->wakeref);
+}
+
+int intel_engines_resume(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ intel_gt_pm_get(i915);
+ for_each_engine(engine, i915, id) {
+ intel_engine_pm_get(engine);
+ engine->serial++; /* kernel context lost */
+ err = engine->resume(engine);
+ intel_engine_pm_put(engine);
+ if (err) {
+ dev_err(i915->drm.dev,
+ "Failed to restart %s (%d)\n",
+ engine->name, err);
+ break;
+ }
+ }
+ intel_gt_pm_put(i915);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
new file mode 100644
index 000000000000..b326cd993d60
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -0,0 +1,22 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_PM_H
+#define INTEL_ENGINE_PM_H
+
+struct drm_i915_private;
+struct intel_engine_cs;
+
+void intel_engine_pm_get(struct intel_engine_cs *engine);
+void intel_engine_pm_put(struct intel_engine_cs *engine);
+
+void intel_engine_park(struct intel_engine_cs *engine);
+
+void intel_engine_init__pm(struct intel_engine_cs *engine);
+
+int intel_engines_resume(struct drm_i915_private *i915);
+
+#endif /* INTEL_ENGINE_PM_H */
diff --git a/drivers/gpu/drm/i915/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 1f970c76b6a6..868b220214f8 100644
--- a/drivers/gpu/drm/i915/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -11,23 +11,26 @@
#include <linux/irq_work.h>
#include <linux/kref.h>
#include <linux/list.h>
+#include <linux/llist.h>
#include <linux/types.h>
#include "i915_gem.h"
+#include "i915_gem_batch_pool.h"
+#include "i915_pmu.h"
#include "i915_priolist_types.h"
#include "i915_selftest.h"
#include "i915_timeline_types.h"
+#include "intel_sseu.h"
+#include "intel_wakeref.h"
#include "intel_workarounds_types.h"
-#include "i915_gem_batch_pool.h"
-#include "i915_pmu.h"
-
#define I915_MAX_SLICES 3
#define I915_MAX_SUBSLICES 8
#define I915_CMD_HASH_ORDER 9
struct dma_fence;
+struct drm_i915_gem_object;
struct drm_i915_reg_table;
struct i915_gem_context;
struct i915_request;
@@ -52,8 +55,8 @@ struct intel_instdone {
struct intel_engine_hangcheck {
u64 acthd;
- u32 last_seqno;
- u32 next_seqno;
+ u32 last_ring;
+ u32 last_head;
unsigned long action_timestamp;
struct intel_instdone instdone;
};
@@ -226,6 +229,7 @@ struct intel_engine_execlists {
* @queue: queue of requests, in priority lists
*/
struct rb_root_cached queue;
+ struct rb_root_cached virtual;
/**
* @csb_write: control register for Context Switch buffer
@@ -278,13 +282,28 @@ struct intel_engine_cs {
u32 context_size;
u32 mmio_base;
+ u32 uabi_capabilities;
+
+ struct intel_sseu sseu;
+
struct intel_ring *buffer;
- struct i915_timeline timeline;
+ struct {
+ spinlock_t lock;
+ struct list_head requests;
+ } active;
+
+ struct llist_head barrier_tasks;
struct intel_context *kernel_context; /* pinned */
struct intel_context *preempt_context; /* pinned; optional */
+ intel_engine_mask_t saturated; /* submitting semaphores too late? */
+
+ unsigned long serial;
+
+ unsigned long wakeref_serial;
+ struct intel_wakeref wakeref;
struct drm_i915_gem_object *default_state;
void *pinned_default_state;
@@ -357,7 +376,7 @@ struct intel_engine_cs {
void (*irq_enable)(struct intel_engine_cs *engine);
void (*irq_disable)(struct intel_engine_cs *engine);
- int (*init_hw)(struct intel_engine_cs *engine);
+ int (*resume)(struct intel_engine_cs *engine);
struct {
void (*prepare)(struct intel_engine_cs *engine);
@@ -398,6 +417,13 @@ struct intel_engine_cs {
void (*submit_request)(struct i915_request *rq);
/*
+ * Called on signaling of a SUBMIT_FENCE, passing along the signaling
+ * request down to the bonded pairs.
+ */
+ void (*bond_execute)(struct i915_request *rq,
+ struct dma_fence *signal);
+
+ /*
* Call when the priority on a request has changed and it and its
* dependencies may need rescheduling. Note the request itself may
* not be ready to run!
@@ -413,21 +439,10 @@ struct intel_engine_cs {
*/
void (*cancel_requests)(struct intel_engine_cs *engine);
- void (*cleanup)(struct intel_engine_cs *engine);
+ void (*destroy)(struct intel_engine_cs *engine);
struct intel_engine_execlists execlists;
- /* Contexts are pinned whilst they are active on the GPU. The last
- * context executed remains active whilst the GPU is idle - the
- * switch away and write to the context object only occurs on the
- * next execution. Contexts are only unpinned on retirement of the
- * following request ensuring that we can always write to the object
- * on the context switch even after idling. Across suspend, we switch
- * to the kernel context and trash it as the save may not happen
- * before the hardware is powered down.
- */
- struct intel_context *last_retired_context;
-
/* status_notifier: list of callbacks for context-switch changes */
struct atomic_notifier_head context_status_notifier;
@@ -438,6 +453,7 @@ struct intel_engine_cs {
#define I915_ENGINE_HAS_PREEMPTION BIT(2)
#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
+#define I915_ENGINE_IS_VIRTUAL BIT(5)
unsigned int flags;
/*
@@ -527,6 +543,12 @@ intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
}
+static inline bool
+intel_engine_is_virtual(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_IS_VIRTUAL;
+}
+
#define instdone_slice_mask(dev_priv__) \
(IS_GEN(dev_priv__, 7) ? \
1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
diff --git a/drivers/gpu/drm/i915/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index a34ece53a771..eec31e36aca7 100644
--- a/drivers/gpu/drm/i915/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -180,6 +180,7 @@
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2))
+#define XY_COLOR_BLT_CMD (2 << 29 | 0x50 << 22)
#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
new file mode 100644
index 000000000000..7b5967751762
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -0,0 +1,143 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_gt_pm.h"
+#include "intel_pm.h"
+#include "intel_wakeref.h"
+
+static void pm_notify(struct drm_i915_private *i915, int state)
+{
+ blocking_notifier_call_chain(&i915->gt.pm_notifications, state, i915);
+}
+
+static int intel_gt_unpark(struct intel_wakeref *wf)
+{
+ struct drm_i915_private *i915 =
+ container_of(wf, typeof(*i915), gt.wakeref);
+
+ GEM_TRACE("\n");
+
+ /*
+ * It seems that the DMC likes to transition between the DC states a lot
+ * when there are no connected displays (no active power domains) during
+ * command submission.
+ *
+ * This activity has negative impact on the performance of the chip with
+ * huge latencies observed in the interrupt handler and elsewhere.
+ *
+ * Work around it by grabbing a GT IRQ power domain whilst there is any
+ * GT activity, preventing any DC state transitions.
+ */
+ i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+ GEM_BUG_ON(!i915->gt.awake);
+
+ intel_enable_gt_powersave(i915);
+
+ i915_update_gfx_val(i915);
+ if (INTEL_GEN(i915) >= 6)
+ gen6_rps_busy(i915);
+
+ i915_pmu_gt_unparked(i915);
+
+ i915_queue_hangcheck(i915);
+
+ pm_notify(i915, INTEL_GT_UNPARK);
+
+ return 0;
+}
+
+void intel_gt_pm_get(struct drm_i915_private *i915)
+{
+ intel_wakeref_get(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_unpark);
+}
+
+static int intel_gt_park(struct intel_wakeref *wf)
+{
+ struct drm_i915_private *i915 =
+ container_of(wf, typeof(*i915), gt.wakeref);
+ intel_wakeref_t wakeref = fetch_and_zero(&i915->gt.awake);
+
+ GEM_TRACE("\n");
+
+ pm_notify(i915, INTEL_GT_PARK);
+
+ i915_pmu_gt_parked(i915);
+ if (INTEL_GEN(i915) >= 6)
+ gen6_rps_idle(i915);
+
+ GEM_BUG_ON(!wakeref);
+ intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
+
+ return 0;
+}
+
+void intel_gt_pm_put(struct drm_i915_private *i915)
+{
+ intel_wakeref_put(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_park);
+}
+
+void intel_gt_pm_init(struct drm_i915_private *i915)
+{
+ intel_wakeref_init(&i915->gt.wakeref);
+ BLOCKING_INIT_NOTIFIER_HEAD(&i915->gt.pm_notifications);
+}
+
+static bool reset_engines(struct drm_i915_private *i915)
+{
+ if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
+ return false;
+
+ return intel_gpu_reset(i915, ALL_ENGINES) == 0;
+}
+
+/**
+ * intel_gt_sanitize: called after the GPU has lost power
+ * @i915: the i915 device
+ * @force: ignore a failed reset and sanitize engine state anyway
+ *
+ * Anytime we reset the GPU, either with an explicit GPU reset or through a
+ * PCI power cycle, the GPU loses state and we must reset our state tracking
+ * to match. Note that calling intel_gt_sanitize() if the GPU has not
+ * been reset results in much confusion!
+ */
+void intel_gt_sanitize(struct drm_i915_private *i915, bool force)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ GEM_TRACE("\n");
+
+ if (!reset_engines(i915) && !force)
+ return;
+
+ for_each_engine(engine, i915, id)
+ intel_engine_reset(engine, false);
+}
+
+void intel_gt_resume(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * After resume, we may need to poke into the pinned kernel
+ * contexts to paper over any damage caused by the sudden suspend.
+ * Only the kernel contexts should remain pinned over suspend,
+ * allowing us to fixup the user contexts on their first pin.
+ */
+ for_each_engine(engine, i915, id) {
+ struct intel_context *ce;
+
+ ce = engine->kernel_context;
+ if (ce)
+ ce->ops->reset(ce);
+
+ ce = engine->preempt_context;
+ if (ce)
+ ce->ops->reset(ce);
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
new file mode 100644
index 000000000000..7dd1130a19a4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -0,0 +1,27 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_PM_H
+#define INTEL_GT_PM_H
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+enum {
+ INTEL_GT_UNPARK,
+ INTEL_GT_PARK,
+};
+
+void intel_gt_pm_get(struct drm_i915_private *i915);
+void intel_gt_pm_put(struct drm_i915_private *i915);
+
+void intel_gt_pm_init(struct drm_i915_private *i915);
+
+void intel_gt_sanitize(struct drm_i915_private *i915, bool force);
+void intel_gt_resume(struct drm_i915_private *i915);
+
+#endif /* INTEL_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c
index 3d51ed1428d4..6bcfa6456c45 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/intel_hangcheck.c
@@ -22,12 +22,13 @@
*
*/
+#include "intel_reset.h"
#include "i915_drv.h"
-#include "i915_reset.h"
struct hangcheck {
u64 acthd;
- u32 seqno;
+ u32 ring;
+ u32 head;
enum intel_engine_hangcheck_action action;
unsigned long action_timestamp;
int deadlock;
@@ -133,26 +134,31 @@ static void hangcheck_load_sample(struct intel_engine_cs *engine,
struct hangcheck *hc)
{
hc->acthd = intel_engine_get_active_head(engine);
- hc->seqno = intel_engine_get_hangcheck_seqno(engine);
+ hc->ring = ENGINE_READ(engine, RING_START);
+ hc->head = ENGINE_READ(engine, RING_HEAD);
}
static void hangcheck_store_sample(struct intel_engine_cs *engine,
const struct hangcheck *hc)
{
engine->hangcheck.acthd = hc->acthd;
- engine->hangcheck.last_seqno = hc->seqno;
+ engine->hangcheck.last_ring = hc->ring;
+ engine->hangcheck.last_head = hc->head;
}
static enum intel_engine_hangcheck_action
hangcheck_get_action(struct intel_engine_cs *engine,
const struct hangcheck *hc)
{
- if (engine->hangcheck.last_seqno != hc->seqno)
- return ENGINE_ACTIVE_SEQNO;
-
if (intel_engine_is_idle(engine))
return ENGINE_IDLE;
+ if (engine->hangcheck.last_ring != hc->ring)
+ return ENGINE_ACTIVE_SEQNO;
+
+ if (engine->hangcheck.last_head != hc->head)
+ return ENGINE_ACTIVE_SEQNO;
+
return engine_stuck(engine, hc->acthd);
}
@@ -217,8 +223,8 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
}
static void hangcheck_declare_hang(struct drm_i915_private *i915,
- unsigned int hung,
- unsigned int stuck)
+ intel_engine_mask_t hung,
+ intel_engine_mask_t stuck)
{
struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
@@ -253,9 +259,10 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
gpu_error.hangcheck_work.work);
+ intel_engine_mask_t hung = 0, stuck = 0, wedged = 0;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- unsigned int hung = 0, stuck = 0, wedged = 0;
+ intel_wakeref_t wakeref;
if (!i915_modparams.enable_hangcheck)
return;
@@ -266,6 +273,10 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (i915_terminally_wedged(dev_priv))
return;
+ wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
+ if (!wakeref)
+ return;
+
/* As enabling the GPU requires fairly extensive mmio access,
* periodically arm the mmio checker to see if we are triggering
* any invalid access.
@@ -313,6 +324,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (hung)
hangcheck_declare_hang(dev_priv, hung, stuck);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
/* Reset timer in case GPU hangs without another request being added */
i915_queue_hangcheck(dev_priv);
}
@@ -330,5 +343,5 @@ void intel_hangcheck_init(struct drm_i915_private *i915)
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/intel_hangcheck.c"
+#include "selftest_hangcheck.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 11e5a86610bf..b42b5f158295 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -133,13 +133,15 @@
*/
#include <linux/interrupt.h>
-#include <drm/i915_drm.h>
+#include "gem/i915_gem_context.h"
+
#include "i915_drv.h"
#include "i915_gem_render_state.h"
-#include "i915_reset.h"
#include "i915_vgpu.h"
+#include "intel_engine_pm.h"
#include "intel_lrc_reg.h"
#include "intel_mocs.h"
+#include "intel_reset.h"
#include "intel_workarounds.h"
#define RING_EXECLIST_QFULL (1 << 0x2)
@@ -164,7 +166,53 @@
#define WA_TAIL_DWORDS 2
#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
-#define ACTIVE_PRIORITY (I915_PRIORITY_NOSEMAPHORE)
+struct virtual_engine {
+ struct intel_engine_cs base;
+ struct intel_context context;
+
+ /*
+ * We allow only a single request through the virtual engine at a time
+ * (each request in the timeline waits for the completion fence of
+ * the previous before being submitted). By restricting ourselves to
+ * only submitting a single request, each request is placed on to a
+ * physical to maximise load spreading (by virtue of the late greedy
+ * scheduling -- each real engine takes the next available request
+ * upon idling).
+ */
+ struct i915_request *request;
+
+ /*
+ * We keep a rbtree of available virtual engines inside each physical
+ * engine, sorted by priority. Here we preallocate the nodes we need
+ * for the virtual engine, indexed by physical_engine->id.
+ */
+ struct ve_node {
+ struct rb_node rb;
+ int prio;
+ } nodes[I915_NUM_ENGINES];
+
+ /*
+ * Keep track of bonded pairs -- restrictions upon on our selection
+ * of physical engines any particular request may be submitted to.
+ * If we receive a submit-fence from a master engine, we will only
+ * use one of sibling_mask physical engines.
+ */
+ struct ve_bond {
+ const struct intel_engine_cs *master;
+ intel_engine_mask_t sibling_mask;
+ } *bonds;
+ unsigned int num_bonds;
+
+ /* And finally, which physical engines this virtual engine maps onto. */
+ unsigned int num_siblings;
+ struct intel_engine_cs *siblings[0];
+};
+
+static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
+{
+ GEM_BUG_ON(!intel_engine_is_virtual(engine));
+ return container_of(engine, struct virtual_engine, base);
+}
static int execlists_context_deferred_alloc(struct intel_context *ce,
struct intel_engine_cs *engine);
@@ -189,23 +237,12 @@ static int effective_prio(const struct i915_request *rq)
/*
* On unwinding the active request, we give it a priority bump
- * equivalent to a freshly submitted request. This protects it from
- * being gazumped again, but it would be preferable if we didn't
- * let it be gazumped in the first place!
- *
- * See __unwind_incomplete_requests()
+ * if it has completed waiting on any semaphore. If we know that
+ * the request has already started, we can prevent an unwanted
+ * preempt-to-idle cycle by taking that into account now.
*/
- if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(rq)) {
- /*
- * After preemption, we insert the active request at the
- * end of the new priority level. This means that we will be
- * _lower_ priority than the preemptee all things equal (and
- * so the preemption is valid), so adjust our comparison
- * accordingly.
- */
- prio |= ACTIVE_PRIORITY;
- prio--;
- }
+ if (__i915_request_has_started(rq))
+ prio |= I915_PRIORITY_NOSEMAPHORE;
/* Restrict mere WAIT boosts from triggering preemption */
return prio | __NO_PREEMPTION;
@@ -229,7 +266,8 @@ static int queue_prio(const struct intel_engine_execlists *execlists)
}
static inline bool need_preempt(const struct intel_engine_cs *engine,
- const struct i915_request *rq)
+ const struct i915_request *rq,
+ struct rb_node *rb)
{
int last_prio;
@@ -252,18 +290,37 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
* ourselves, ignore the request.
*/
last_prio = effective_prio(rq);
- if (!__execlists_need_preempt(engine->execlists.queue_priority_hint,
- last_prio))
+ if (!i915_scheduler_need_preempt(engine->execlists.queue_priority_hint,
+ last_prio))
return false;
/*
* Check against the first request in ELSP[1], it will, thanks to the
* power of PI, be the highest priority of that context.
*/
- if (!list_is_last(&rq->link, &engine->timeline.requests) &&
- rq_prio(list_next_entry(rq, link)) > last_prio)
+ if (!list_is_last(&rq->sched.link, &engine->active.requests) &&
+ rq_prio(list_next_entry(rq, sched.link)) > last_prio)
return true;
+ if (rb) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ bool preempt = false;
+
+ if (engine == ve->siblings[0]) { /* only preempt one sibling */
+ struct i915_request *next;
+
+ rcu_read_lock();
+ next = READ_ONCE(ve->request);
+ if (next)
+ preempt = rq_prio(next) > last_prio;
+ rcu_read_unlock();
+ }
+
+ if (preempt)
+ return preempt;
+ }
+
/*
* If the inflight context did not trigger the preemption, then maybe
* it was the set of queued requests? Pick the highest priority in
@@ -375,55 +432,46 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
{
struct i915_request *rq, *rn, *active = NULL;
struct list_head *uninitialized_var(pl);
- int prio = I915_PRIORITY_INVALID | ACTIVE_PRIORITY;
+ int prio = I915_PRIORITY_INVALID;
- lockdep_assert_held(&engine->timeline.lock);
+ lockdep_assert_held(&engine->active.lock);
list_for_each_entry_safe_reverse(rq, rn,
- &engine->timeline.requests,
- link) {
+ &engine->active.requests,
+ sched.link) {
+ struct intel_engine_cs *owner;
+
if (i915_request_completed(rq))
break;
__i915_request_unsubmit(rq);
unwind_wa_tail(rq);
- GEM_BUG_ON(rq->hw_context->active);
-
- GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
- if (rq_prio(rq) != prio) {
- prio = rq_prio(rq);
- pl = i915_sched_lookup_priolist(engine, prio);
- }
- GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+ GEM_BUG_ON(rq->hw_context->inflight);
- list_add(&rq->sched.link, pl);
-
- active = rq;
- }
+ /*
+ * Push the request back into the queue for later resubmission.
+ * If this request is not native to this physical engine (i.e.
+ * it came from a virtual source), push it back onto the virtual
+ * engine so that it can be moved across onto another physical
+ * engine as load dictates.
+ */
+ owner = rq->hw_context->engine;
+ if (likely(owner == engine)) {
+ GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
+ if (rq_prio(rq) != prio) {
+ prio = rq_prio(rq);
+ pl = i915_sched_lookup_priolist(engine, prio);
+ }
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
- /*
- * The active request is now effectively the start of a new client
- * stream, so give it the equivalent small priority bump to prevent
- * it being gazumped a second time by another peer.
- *
- * Note we have to be careful not to apply a priority boost to a request
- * still spinning on its semaphores. If the request hasn't started, that
- * means it is still waiting for its dependencies to be signaled, and
- * if we apply a priority boost to this request, we will boost it past
- * its signalers and so break PI.
- *
- * One consequence of this preemption boost is that we may jump
- * over lesser priorities (such as I915_PRIORITY_WAIT), effectively
- * making those priorities non-preemptible. They will be moved forward
- * in the priority queue, but they will not gain immediate access to
- * the GPU.
- */
- if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(active)) {
- prio |= ACTIVE_PRIORITY;
- active->sched.attr.priority = prio;
- list_move_tail(&active->sched.link,
- i915_sched_lookup_priolist(engine, prio));
+ list_move(&rq->sched.link, pl);
+ active = rq;
+ } else {
+ rq->engine = owner;
+ owner->submit_request(rq);
+ active = NULL;
+ }
}
return active;
@@ -468,20 +516,41 @@ execlists_user_end(struct intel_engine_execlists *execlists)
static inline void
execlists_context_schedule_in(struct i915_request *rq)
{
- GEM_BUG_ON(rq->hw_context->active);
+ GEM_BUG_ON(rq->hw_context->inflight);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
intel_engine_context_in(rq->engine);
- rq->hw_context->active = rq->engine;
+ rq->hw_context->inflight = rq->engine;
+}
+
+static void kick_siblings(struct i915_request *rq)
+{
+ struct virtual_engine *ve = to_virtual_engine(rq->hw_context->engine);
+ struct i915_request *next = READ_ONCE(ve->request);
+
+ if (next && next->execution_mask & ~rq->execution_mask)
+ tasklet_schedule(&ve->base.execlists.tasklet);
}
static inline void
execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
{
- rq->hw_context->active = NULL;
+ rq->hw_context->inflight = NULL;
intel_engine_context_out(rq->engine);
execlists_context_status_change(rq, status);
trace_i915_request_out(rq);
+
+ /*
+ * If this is part of a virtual engine, its next request may have
+ * been blocked waiting for access to the active context. We have
+ * to kick all the siblings again in case we need to switch (e.g.
+ * the next request is not runnable on this engine). Hopefully,
+ * we will already have submitted the next request before the
+ * tasklet runs and do not need to rebuild each virtual tree
+ * and kick everyone again.
+ */
+ if (rq->engine != rq->hw_context->engine)
+ kick_siblings(rq);
}
static u64 execlists_update_context(struct i915_request *rq)
@@ -535,7 +604,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
* that all ELSP are drained i.e. we have processed the CSB,
* before allowing ourselves to idle and calling intel_runtime_pm_put().
*/
- GEM_BUG_ON(!engine->i915->gt.awake);
+ GEM_BUG_ON(!intel_wakeref_active(&engine->wakeref));
/*
* ELSQ note: the submit queue is not cleared after being submitted
@@ -659,6 +728,93 @@ static void complete_preempt_context(struct intel_engine_execlists *execlists)
execlists));
}
+static void virtual_update_register_offsets(u32 *regs,
+ struct intel_engine_cs *engine)
+{
+ u32 base = engine->mmio_base;
+
+ /* Must match execlists_init_reg_state()! */
+
+ regs[CTX_CONTEXT_CONTROL] =
+ i915_mmio_reg_offset(RING_CONTEXT_CONTROL(base));
+ regs[CTX_RING_HEAD] = i915_mmio_reg_offset(RING_HEAD(base));
+ regs[CTX_RING_TAIL] = i915_mmio_reg_offset(RING_TAIL(base));
+ regs[CTX_RING_BUFFER_START] = i915_mmio_reg_offset(RING_START(base));
+ regs[CTX_RING_BUFFER_CONTROL] = i915_mmio_reg_offset(RING_CTL(base));
+
+ regs[CTX_BB_HEAD_U] = i915_mmio_reg_offset(RING_BBADDR_UDW(base));
+ regs[CTX_BB_HEAD_L] = i915_mmio_reg_offset(RING_BBADDR(base));
+ regs[CTX_BB_STATE] = i915_mmio_reg_offset(RING_BBSTATE(base));
+ regs[CTX_SECOND_BB_HEAD_U] =
+ i915_mmio_reg_offset(RING_SBBADDR_UDW(base));
+ regs[CTX_SECOND_BB_HEAD_L] = i915_mmio_reg_offset(RING_SBBADDR(base));
+ regs[CTX_SECOND_BB_STATE] = i915_mmio_reg_offset(RING_SBBSTATE(base));
+
+ regs[CTX_CTX_TIMESTAMP] =
+ i915_mmio_reg_offset(RING_CTX_TIMESTAMP(base));
+ regs[CTX_PDP3_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 3));
+ regs[CTX_PDP3_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 3));
+ regs[CTX_PDP2_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 2));
+ regs[CTX_PDP2_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 2));
+ regs[CTX_PDP1_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 1));
+ regs[CTX_PDP1_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 1));
+ regs[CTX_PDP0_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
+ regs[CTX_PDP0_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
+
+ if (engine->class == RENDER_CLASS) {
+ regs[CTX_RCS_INDIRECT_CTX] =
+ i915_mmio_reg_offset(RING_INDIRECT_CTX(base));
+ regs[CTX_RCS_INDIRECT_CTX_OFFSET] =
+ i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(base));
+ regs[CTX_BB_PER_CTX_PTR] =
+ i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(base));
+
+ regs[CTX_R_PWR_CLK_STATE] =
+ i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
+ }
+}
+
+static bool virtual_matches(const struct virtual_engine *ve,
+ const struct i915_request *rq,
+ const struct intel_engine_cs *engine)
+{
+ const struct intel_engine_cs *inflight;
+
+ if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */
+ return false;
+
+ /*
+ * We track when the HW has completed saving the context image
+ * (i.e. when we have seen the final CS event switching out of
+ * the context) and must not overwrite the context image before
+ * then. This restricts us to only using the active engine
+ * while the previous virtualized request is inflight (so
+ * we reuse the register offsets). This is a very small
+ * hystersis on the greedy seelction algorithm.
+ */
+ inflight = READ_ONCE(ve->context.inflight);
+ if (inflight && inflight != engine)
+ return false;
+
+ return true;
+}
+
+static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
+ struct intel_engine_cs *engine)
+{
+ struct intel_engine_cs *old = ve->siblings[0];
+
+ /* All unattached (rq->engine == old) must already be completed */
+
+ spin_lock(&old->breadcrumbs.irq_lock);
+ if (!list_empty(&ve->context.signal_link)) {
+ list_move_tail(&ve->context.signal_link,
+ &engine->breadcrumbs.signalers);
+ intel_engine_queue_breadcrumbs(engine);
+ }
+ spin_unlock(&old->breadcrumbs.irq_lock);
+}
+
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -691,6 +847,26 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* and context switches) submission.
*/
+ for (rb = rb_first_cached(&execlists->virtual); rb; ) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ struct i915_request *rq = READ_ONCE(ve->request);
+
+ if (!rq) { /* lazily cleanup after another engine handled rq */
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
+ rb = rb_first_cached(&execlists->virtual);
+ continue;
+ }
+
+ if (!virtual_matches(ve, rq, engine)) {
+ rb = rb_next(rb);
+ continue;
+ }
+
+ break;
+ }
+
if (last) {
/*
* Don't resubmit or switch until all outstanding
@@ -712,7 +888,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
return;
- if (need_preempt(engine, last)) {
+ if (need_preempt(engine, last, rb)) {
inject_preempt_context(engine);
return;
}
@@ -752,6 +928,93 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
last->tail = last->wa_tail;
}
+ while (rb) { /* XXX virtual is always taking precedence */
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ struct i915_request *rq;
+
+ spin_lock(&ve->base.active.lock);
+
+ rq = ve->request;
+ if (unlikely(!rq)) { /* lost the race to a sibling */
+ spin_unlock(&ve->base.active.lock);
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
+ rb = rb_first_cached(&execlists->virtual);
+ continue;
+ }
+
+ GEM_BUG_ON(rq != ve->request);
+ GEM_BUG_ON(rq->engine != &ve->base);
+ GEM_BUG_ON(rq->hw_context != &ve->context);
+
+ if (rq_prio(rq) >= queue_prio(execlists)) {
+ if (!virtual_matches(ve, rq, engine)) {
+ spin_unlock(&ve->base.active.lock);
+ rb = rb_next(rb);
+ continue;
+ }
+
+ if (last && !can_merge_rq(last, rq)) {
+ spin_unlock(&ve->base.active.lock);
+ return; /* leave this rq for another engine */
+ }
+
+ GEM_TRACE("%s: virtual rq=%llx:%lld%s, new engine? %s\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno,
+ i915_request_completed(rq) ? "!" :
+ i915_request_started(rq) ? "*" :
+ "",
+ yesno(engine != ve->siblings[0]));
+
+ ve->request = NULL;
+ ve->base.execlists.queue_priority_hint = INT_MIN;
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
+
+ GEM_BUG_ON(!(rq->execution_mask & engine->mask));
+ rq->engine = engine;
+
+ if (engine != ve->siblings[0]) {
+ u32 *regs = ve->context.lrc_reg_state;
+ unsigned int n;
+
+ GEM_BUG_ON(READ_ONCE(ve->context.inflight));
+ virtual_update_register_offsets(regs, engine);
+
+ if (!list_empty(&ve->context.signals))
+ virtual_xfer_breadcrumbs(ve, engine);
+
+ /*
+ * Move the bound engine to the top of the list
+ * for future execution. We then kick this
+ * tasklet first before checking others, so that
+ * we preferentially reuse this set of bound
+ * registers.
+ */
+ for (n = 1; n < ve->num_siblings; n++) {
+ if (ve->siblings[n] == engine) {
+ swap(ve->siblings[n],
+ ve->siblings[0]);
+ break;
+ }
+ }
+
+ GEM_BUG_ON(ve->siblings[0] != engine);
+ }
+
+ __i915_request_submit(rq);
+ trace_i915_request_in(rq, port_index(port, execlists));
+ submit = true;
+ last = rq;
+ }
+
+ spin_unlock(&ve->base.active.lock);
+ break;
+ }
+
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
@@ -805,8 +1068,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(port_isset(port));
}
- list_del_init(&rq->sched.link);
-
__i915_request_submit(rq);
trace_i915_request_in(rq, port_index(port, execlists));
@@ -907,7 +1168,8 @@ static void process_csb(struct intel_engine_cs *engine)
const u8 num_entries = execlists->csb_size;
u8 head, tail;
- lockdep_assert_held(&engine->timeline.lock);
+ lockdep_assert_held(&engine->active.lock);
+ GEM_BUG_ON(USES_GUC_SUBMISSION(engine->i915));
/*
* Note that csb_write, csb_status may be either in HWSP or mmio.
@@ -1067,7 +1329,7 @@ static void process_csb(struct intel_engine_cs *engine)
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
{
- lockdep_assert_held(&engine->timeline.lock);
+ lockdep_assert_held(&engine->active.lock);
process_csb(engine);
if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT))
@@ -1085,18 +1347,19 @@ static void execlists_submission_tasklet(unsigned long data)
GEM_TRACE("%s awake?=%d, active=%x\n",
engine->name,
- !!engine->i915->gt.awake,
+ !!intel_wakeref_active(&engine->wakeref),
engine->execlists.active);
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
__execlists_submission_tasklet(engine);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void queue_request(struct intel_engine_cs *engine,
struct i915_sched_node *node,
int prio)
{
+ GEM_BUG_ON(!list_empty(&node->link));
list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
}
@@ -1127,7 +1390,7 @@ static void execlists_submit_request(struct i915_request *request)
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
queue_request(engine, &request->sched, rq_prio(request));
@@ -1136,7 +1399,7 @@ static void execlists_submit_request(struct i915_request *request)
submit_queue(engine, rq_prio(request));
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void __execlists_context_fini(struct intel_context *ce)
@@ -1159,60 +1422,11 @@ static void execlists_context_destroy(struct kref *kref)
intel_context_free(ce);
}
-static int __context_pin(struct i915_vma *vma)
-{
- unsigned int flags;
- int err;
-
- flags = PIN_GLOBAL | PIN_HIGH;
- flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
-
- err = i915_vma_pin(vma, 0, 0, flags);
- if (err)
- return err;
-
- vma->obj->pin_global++;
- vma->obj->mm.dirty = true;
-
- return 0;
-}
-
-static void __context_unpin(struct i915_vma *vma)
-{
- vma->obj->pin_global--;
- __i915_vma_unpin(vma);
-}
-
static void execlists_context_unpin(struct intel_context *ce)
{
- struct intel_engine_cs *engine;
-
- /*
- * The tasklet may still be using a pointer to our state, via an
- * old request. However, since we know we only unpin the context
- * on retirement of the following request, we know that the last
- * request referencing us will have had a completion CS interrupt.
- * If we see that it is still active, it means that the tasklet hasn't
- * had the chance to run yet; let it run before we teardown the
- * reference it may use.
- */
- engine = READ_ONCE(ce->active);
- if (unlikely(engine)) {
- unsigned long flags;
-
- spin_lock_irqsave(&engine->timeline.lock, flags);
- process_csb(engine);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
-
- GEM_BUG_ON(READ_ONCE(ce->active));
- }
-
i915_gem_context_unpin_hw_id(ce->gem_context);
-
- intel_ring_unpin(ce->ring);
-
i915_gem_object_unpin_map(ce->state->obj);
- __context_unpin(ce->state);
+ intel_ring_unpin(ce->ring);
}
static void
@@ -1232,7 +1446,7 @@ __execlists_update_reg_state(struct intel_context *ce,
/* RPCS */
if (engine->class == RENDER_CLASS)
regs[CTX_R_PWR_CLK_STATE + 1] =
- gen8_make_rpcs(engine->i915, &ce->sseu);
+ intel_sseu_make_rpcs(engine->i915, &ce->sseu);
}
static int
@@ -1242,14 +1456,17 @@ __execlists_context_pin(struct intel_context *ce,
void *vaddr;
int ret;
- GEM_BUG_ON(!ce->gem_context->ppgtt);
+ GEM_BUG_ON(!ce->gem_context->vm);
ret = execlists_context_deferred_alloc(ce, engine);
if (ret)
goto err;
GEM_BUG_ON(!ce->state);
- ret = __context_pin(ce->state);
+ ret = intel_context_active_acquire(ce,
+ engine->i915->ggtt.pin_bias |
+ PIN_OFFSET_BIAS |
+ PIN_HIGH);
if (ret)
goto err;
@@ -1258,7 +1475,7 @@ __execlists_context_pin(struct intel_context *ce,
I915_MAP_OVERRIDE);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
- goto unpin_vma;
+ goto unpin_active;
}
ret = intel_ring_pin(ce->ring);
@@ -1279,8 +1496,8 @@ unpin_ring:
intel_ring_unpin(ce->ring);
unpin_map:
i915_gem_object_unpin_map(ce->state->obj);
-unpin_vma:
- __context_unpin(ce->state);
+unpin_active:
+ intel_context_active_release(ce);
err:
return ret;
}
@@ -1316,6 +1533,9 @@ static const struct intel_context_ops execlists_context_ops = {
.pin = execlists_context_pin,
.unpin = execlists_context_unpin,
+ .enter = intel_context_enter_engine,
+ .exit = intel_context_exit_engine,
+
.reset = execlists_context_reset,
.destroy = execlists_context_destroy,
};
@@ -1355,7 +1575,8 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
static int emit_pdps(struct i915_request *rq)
{
const struct intel_engine_cs * const engine = rq->engine;
- struct i915_hw_ppgtt * const ppgtt = rq->gem_context->ppgtt;
+ struct i915_ppgtt * const ppgtt =
+ i915_vm_to_ppgtt(rq->gem_context->vm);
int err, i;
u32 *cs;
@@ -1428,7 +1649,7 @@ static int execlists_request_alloc(struct i915_request *request)
*/
/* Unconditionally invalidate GPU caches and TLBs. */
- if (i915_vm_is_4lvl(&request->gem_context->ppgtt->vm))
+ if (i915_vm_is_4lvl(request->gem_context->vm))
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
else
ret = emit_pdps(request);
@@ -1655,7 +1876,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
struct i915_vma *vma;
int err;
- obj = i915_gem_object_create(engine->i915, CTX_WA_BB_OBJ_SIZE);
+ obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_OBJ_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -1695,8 +1916,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
unsigned int i;
int ret;
- if (GEM_DEBUG_WARN_ON(engine->id != RCS0))
- return -EINVAL;
+ if (engine->class != RENDER_CLASS)
+ return 0;
switch (INTEL_GEN(engine->i915)) {
case 11:
@@ -1755,31 +1976,30 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
static void enable_execlists(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
- if (INTEL_GEN(dev_priv) >= 11)
- I915_WRITE(RING_MODE_GEN7(engine),
- _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
+ if (INTEL_GEN(engine->i915) >= 11)
+ ENGINE_WRITE(engine,
+ RING_MODE_GEN7,
+ _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
else
- I915_WRITE(RING_MODE_GEN7(engine),
- _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
+ ENGINE_WRITE(engine,
+ RING_MODE_GEN7,
+ _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
- I915_WRITE(RING_MI_MODE(engine->mmio_base),
- _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
- I915_WRITE(RING_HWS_PGA(engine->mmio_base),
- i915_ggtt_offset(engine->status_page.vma));
- POSTING_READ(RING_HWS_PGA(engine->mmio_base));
+ ENGINE_WRITE(engine,
+ RING_HWS_PGA,
+ i915_ggtt_offset(engine->status_page.vma));
+ ENGINE_POSTING_READ(engine, RING_HWS_PGA);
}
static bool unexpected_starting_state(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
bool unexpected = false;
- if (I915_READ(RING_MI_MODE(engine->mmio_base)) & STOP_RING) {
+ if (ENGINE_READ(engine, RING_MI_MODE) & STOP_RING) {
DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n");
unexpected = true;
}
@@ -1787,7 +2007,7 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
return unexpected;
}
-static int gen8_init_common_ring(struct intel_engine_cs *engine)
+static int execlists_resume(struct intel_engine_cs *engine)
{
intel_engine_apply_workarounds(engine);
intel_engine_apply_whitelist(engine);
@@ -1820,7 +2040,7 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
* completed the reset in i915_gem_reset_finish(). If a request
* is completed by one engine, it may then queue a request
* to a second via its execlists->tasklet *just* as we are
- * calling engine->init_hw() and also writing the ELSP.
+ * calling engine->resume() and also writing the ELSP.
* Turning off the execlists->tasklet until the reset is over
* prevents the race.
*/
@@ -1830,8 +2050,8 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
intel_engine_stop_cs(engine);
/* And flush any current direct submission. */
- spin_lock_irqsave(&engine->timeline.lock, flags);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static bool lrc_regs_ok(const struct i915_request *rq)
@@ -1872,6 +2092,25 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists)
&execlists->csb_status[reset_value]);
}
+static struct i915_request *active_request(struct i915_request *rq)
+{
+ const struct list_head * const list = &rq->engine->active.requests;
+ const struct intel_context * const context = rq->hw_context;
+ struct i915_request *active = NULL;
+
+ list_for_each_entry_from_reverse(rq, list, sched.link) {
+ if (i915_request_completed(rq))
+ break;
+
+ if (rq->hw_context != context)
+ break;
+
+ active = rq;
+ }
+
+ return active;
+}
+
static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -1892,7 +2131,8 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
if (!port_isset(execlists->port))
goto out_clear;
- ce = port_request(execlists->port)->hw_context;
+ rq = port_request(execlists->port);
+ ce = rq->hw_context;
/*
* Catch up with any missed context-switch interrupts.
@@ -1905,16 +2145,10 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
*/
execlists_cancel_port_requests(execlists);
- /* Push back any incomplete requests for replay after the reset. */
- rq = __unwind_incomplete_requests(engine);
+ rq = active_request(rq);
if (!rq)
goto out_replay;
- if (rq->hw_context != ce) { /* caught just before a CS event */
- rq = NULL;
- goto out_replay;
- }
-
/*
* If this request hasn't started yet, e.g. it is waiting on a
* semaphore, we need to avoid skipping the request or else we
@@ -1961,13 +2195,16 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
}
execlists_init_reg_state(regs, ce, engine, ce->ring);
- /* Rerun the request; its payload has been neutered (if guilty). */
out_replay:
+ /* Rerun the request; its payload has been neutered (if guilty). */
ce->ring->head =
rq ? intel_ring_wrap(ce->ring, rq->head) : ce->ring->tail;
intel_ring_update_space(ce->ring);
__execlists_update_reg_state(ce, engine);
+ /* Push back any incomplete requests for replay after the reset. */
+ __unwind_incomplete_requests(engine);
+
out_clear:
execlists_clear_all_active(execlists);
}
@@ -1978,11 +2215,11 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
GEM_TRACE("%s\n", engine->name);
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
__execlists_reset(engine, stalled);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void nop_submission_tasklet(unsigned long data)
@@ -2013,12 +2250,12 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
* submission's irq state, we also wish to remind ourselves that
* it is irq state.)
*/
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
__execlists_reset(engine, true);
/* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &engine->timeline.requests, link) {
+ list_for_each_entry(rq, &engine->active.requests, sched.link) {
if (!i915_request_signaled(rq))
dma_fence_set_error(&rq->fence, -EIO);
@@ -2041,6 +2278,26 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
i915_priolist_free(p);
}
+ /* Cancel all attached virtual engines */
+ while ((rb = rb_first_cached(&execlists->virtual))) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
+
+ spin_lock(&ve->base.active.lock);
+ if (ve->request) {
+ ve->request->engine = engine;
+ __i915_request_submit(ve->request);
+ dma_fence_set_error(&ve->request->fence, -EIO);
+ i915_request_mark_complete(ve->request);
+ ve->base.execlists.queue_priority_hint = INT_MIN;
+ ve->request = NULL;
+ }
+ spin_unlock(&ve->base.active.lock);
+ }
+
/* Remaining _unready_ requests will be nop'ed when submitted */
execlists->queue_priority_hint = INT_MIN;
@@ -2050,7 +2307,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
execlists->tasklet.func = nop_submission_tasklet;
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void execlists_reset_finish(struct intel_engine_cs *engine)
@@ -2270,12 +2527,6 @@ static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
request->timeline->hwsp_offset,
0);
- cs = gen8_emit_ggtt_write(cs,
- intel_engine_next_hangcheck_seqno(request->engine),
- I915_GEM_HWS_HANGCHECK_ADDR,
- MI_FLUSH_DW_STORE_INDEX);
-
-
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -2287,19 +2538,17 @@ static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
+ /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
cs = gen8_emit_ggtt_write_rcs(cs,
request->fence.seqno,
request->timeline->hwsp_offset,
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DC_FLUSH_ENABLE |
- PIPE_CONTROL_FLUSH_ENABLE |
- PIPE_CONTROL_CS_STALL);
-
- cs = gen8_emit_ggtt_write_rcs(cs,
- intel_engine_next_hangcheck_seqno(request->engine),
- I915_GEM_HWS_HANGCHECK_ADDR,
- PIPE_CONTROL_STORE_DATA_INDEX);
+ PIPE_CONTROL_DC_FLUSH_ENABLE);
+ cs = gen8_emit_pipe_control(cs,
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_CS_STALL,
+ 0);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -2329,38 +2578,9 @@ static int gen8_init_rcs_context(struct i915_request *rq)
return i915_gem_render_state_emit(rq);
}
-/**
- * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
- * @engine: Engine Command Streamer.
- */
-void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
+static void execlists_park(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv;
-
- /*
- * Tasklet cannot be active at this point due intel_mark_active/idle
- * so this is just for documentation.
- */
- if (WARN_ON(test_bit(TASKLET_STATE_SCHED,
- &engine->execlists.tasklet.state)))
- tasklet_kill(&engine->execlists.tasklet);
-
- dev_priv = engine->i915;
-
- if (engine->buffer) {
- WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
- }
-
- if (engine->cleanup)
- engine->cleanup(engine);
-
- intel_engine_cleanup_common(engine);
-
- lrc_destroy_wa_ctx(engine);
-
- engine->i915 = NULL;
- dev_priv->engine[engine->id] = NULL;
- kfree(engine);
+ intel_engine_park(engine);
}
void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
@@ -2374,7 +2594,7 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
engine->reset.reset = execlists_reset;
engine->reset.finish = execlists_reset_finish;
- engine->park = NULL;
+ engine->park = execlists_park;
engine->unpark = NULL;
engine->flags |= I915_ENGINE_SUPPORTS_STATS;
@@ -2385,11 +2605,20 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
}
+static void execlists_destroy(struct intel_engine_cs *engine)
+{
+ intel_engine_cleanup_common(engine);
+ lrc_destroy_wa_ctx(engine);
+ kfree(engine);
+}
+
static void
logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
/* Default vfuncs which can be overriden by each engine. */
- engine->init_hw = gen8_init_common_ring;
+
+ engine->destroy = execlists_destroy;
+ engine->resume = execlists_resume;
engine->reset.prepare = execlists_reset_prepare;
engine->reset.reset = execlists_reset;
@@ -2442,15 +2671,8 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
}
-static int
-logical_ring_setup(struct intel_engine_cs *engine)
+int intel_execlists_submission_setup(struct intel_engine_cs *engine)
{
- int err;
-
- err = intel_engine_setup_common(engine);
- if (err)
- return err;
-
/* Intentionally left blank. */
engine->buffer = NULL;
@@ -2460,13 +2682,20 @@ logical_ring_setup(struct intel_engine_cs *engine)
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
+ if (engine->class == RENDER_CLASS) {
+ engine->init_context = gen8_init_rcs_context;
+ engine->emit_flush = gen8_emit_flush_render;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
+ }
+
return 0;
}
-static int logical_ring_init(struct intel_engine_cs *engine)
+int intel_execlists_submission_init(struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = engine->i915;
struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
u32 base = engine->mmio_base;
int ret;
@@ -2475,14 +2704,23 @@ static int logical_ring_init(struct intel_engine_cs *engine)
return ret;
intel_engine_init_workarounds(engine);
+ intel_engine_init_whitelist(engine);
+
+ if (intel_init_workaround_bb(engine))
+ /*
+ * We continue even if we fail to initialize WA batch
+ * because we only expect rare glitches but nothing
+ * critical to prevent us from using GPU
+ */
+ DRM_ERROR("WA batch buffer initialization failed\n");
if (HAS_LOGICAL_RING_ELSQ(i915)) {
- execlists->submit_reg = i915->uncore.regs +
+ execlists->submit_reg = uncore->regs +
i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
- execlists->ctrl_reg = i915->uncore.regs +
+ execlists->ctrl_reg = uncore->regs +
i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
} else {
- execlists->submit_reg = i915->uncore.regs +
+ execlists->submit_reg = uncore->regs +
i915_mmio_reg_offset(RING_ELSP(base));
}
@@ -2497,7 +2735,7 @@ static int logical_ring_init(struct intel_engine_cs *engine)
execlists->csb_write =
&engine->status_page.addr[intel_hws_csb_write_index(i915)];
- if (INTEL_GEN(engine->i915) < 11)
+ if (INTEL_GEN(i915) < 11)
execlists->csb_size = GEN8_CSB_ENTRIES;
else
execlists->csb_size = GEN11_CSB_ENTRIES;
@@ -2507,182 +2745,6 @@ static int logical_ring_init(struct intel_engine_cs *engine)
return 0;
}
-int logical_render_ring_init(struct intel_engine_cs *engine)
-{
- int ret;
-
- ret = logical_ring_setup(engine);
- if (ret)
- return ret;
-
- /* Override some for render ring. */
- engine->init_context = gen8_init_rcs_context;
- engine->emit_flush = gen8_emit_flush_render;
- engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
-
- ret = logical_ring_init(engine);
- if (ret)
- return ret;
-
- ret = intel_init_workaround_bb(engine);
- if (ret) {
- /*
- * We continue even if we fail to initialize WA batch
- * because we only expect rare glitches but nothing
- * critical to prevent us from using GPU
- */
- DRM_ERROR("WA batch buffer initialization failed: %d\n",
- ret);
- }
-
- intel_engine_init_whitelist(engine);
-
- return 0;
-}
-
-int logical_xcs_ring_init(struct intel_engine_cs *engine)
-{
- int err;
-
- err = logical_ring_setup(engine);
- if (err)
- return err;
-
- return logical_ring_init(engine);
-}
-
-u32 gen8_make_rpcs(struct drm_i915_private *i915, struct intel_sseu *req_sseu)
-{
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
- bool subslice_pg = sseu->has_subslice_pg;
- struct intel_sseu ctx_sseu;
- u8 slices, subslices;
- u32 rpcs = 0;
-
- /*
- * No explicit RPCS request is needed to ensure full
- * slice/subslice/EU enablement prior to Gen9.
- */
- if (INTEL_GEN(i915) < 9)
- return 0;
-
- /*
- * If i915/perf is active, we want a stable powergating configuration
- * on the system.
- *
- * We could choose full enablement, but on ICL we know there are use
- * cases which disable slices for functional, apart for performance
- * reasons. So in this case we select a known stable subset.
- */
- if (!i915->perf.oa.exclusive_stream) {
- ctx_sseu = *req_sseu;
- } else {
- ctx_sseu = intel_device_default_sseu(i915);
-
- if (IS_GEN(i915, 11)) {
- /*
- * We only need subslice count so it doesn't matter
- * which ones we select - just turn off low bits in the
- * amount of half of all available subslices per slice.
- */
- ctx_sseu.subslice_mask =
- ~(~0 << (hweight8(ctx_sseu.subslice_mask) / 2));
- ctx_sseu.slice_mask = 0x1;
- }
- }
-
- slices = hweight8(ctx_sseu.slice_mask);
- subslices = hweight8(ctx_sseu.subslice_mask);
-
- /*
- * Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits
- * wide and Icelake has up to eight subslices, specfial programming is
- * needed in order to correctly enable all subslices.
- *
- * According to documentation software must consider the configuration
- * as 2x4x8 and hardware will translate this to 1x8x8.
- *
- * Furthemore, even though SScount is three bits, maximum documented
- * value for it is four. From this some rules/restrictions follow:
- *
- * 1.
- * If enabled subslice count is greater than four, two whole slices must
- * be enabled instead.
- *
- * 2.
- * When more than one slice is enabled, hardware ignores the subslice
- * count altogether.
- *
- * From these restrictions it follows that it is not possible to enable
- * a count of subslices between the SScount maximum of four restriction,
- * and the maximum available number on a particular SKU. Either all
- * subslices are enabled, or a count between one and four on the first
- * slice.
- */
- if (IS_GEN(i915, 11) &&
- slices == 1 &&
- subslices > min_t(u8, 4, hweight8(sseu->subslice_mask[0]) / 2)) {
- GEM_BUG_ON(subslices & 1);
-
- subslice_pg = false;
- slices *= 2;
- }
-
- /*
- * Starting in Gen9, render power gating can leave
- * slice/subslice/EU in a partially enabled state. We
- * must make an explicit request through RPCS for full
- * enablement.
- */
- if (sseu->has_slice_pg) {
- u32 mask, val = slices;
-
- if (INTEL_GEN(i915) >= 11) {
- mask = GEN11_RPCS_S_CNT_MASK;
- val <<= GEN11_RPCS_S_CNT_SHIFT;
- } else {
- mask = GEN8_RPCS_S_CNT_MASK;
- val <<= GEN8_RPCS_S_CNT_SHIFT;
- }
-
- GEM_BUG_ON(val & ~mask);
- val &= mask;
-
- rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_S_CNT_ENABLE | val;
- }
-
- if (subslice_pg) {
- u32 val = subslices;
-
- val <<= GEN8_RPCS_SS_CNT_SHIFT;
-
- GEM_BUG_ON(val & ~GEN8_RPCS_SS_CNT_MASK);
- val &= GEN8_RPCS_SS_CNT_MASK;
-
- rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val;
- }
-
- if (sseu->has_eu_pg) {
- u32 val;
-
- val = ctx_sseu.min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
- GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
- val &= GEN8_RPCS_EU_MIN_MASK;
-
- rpcs |= val;
-
- val = ctx_sseu.max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
- GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
- val &= GEN8_RPCS_EU_MAX_MASK;
-
- rpcs |= val;
-
- rpcs |= GEN8_RPCS_ENABLE;
- }
-
- return rpcs;
-}
-
static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
{
u32 indirect_ctx_offset;
@@ -2717,16 +2779,19 @@ static void execlists_init_reg_state(u32 *regs,
struct intel_engine_cs *engine,
struct intel_ring *ring)
{
- struct i915_hw_ppgtt *ppgtt = ce->gem_context->ppgtt;
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->gem_context->vm);
bool rcs = engine->class == RENDER_CLASS;
u32 base = engine->mmio_base;
- /* A context is actually a big batch buffer with several
+ /*
+ * A context is actually a big batch buffer with several
* MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
* values we are setting here are only for the first context restore:
* on a subsequent save, the GPU will recreate this batchbuffer with new
* values (including all the missing MI_LOAD_REGISTER_IMM commands that
* we are not initializing here).
+ *
+ * Must keep consistent with virtual_update_register_offsets().
*/
regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) |
MI_LRI_FORCE_POSTED;
@@ -2902,7 +2967,7 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
*/
context_size += LRC_HEADER_PAGES * PAGE_SIZE;
- ctx_obj = i915_gem_object_create(engine->i915, context_size);
+ ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size);
if (IS_ERR(ctx_obj))
return PTR_ERR(ctx_obj);
@@ -2945,6 +3010,468 @@ error_deref_obj:
return ret;
}
+static struct list_head *virtual_queue(struct virtual_engine *ve)
+{
+ return &ve->base.execlists.default_priolist.requests[0];
+}
+
+static void virtual_context_destroy(struct kref *kref)
+{
+ struct virtual_engine *ve =
+ container_of(kref, typeof(*ve), context.ref);
+ unsigned int n;
+
+ GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+ GEM_BUG_ON(ve->request);
+ GEM_BUG_ON(ve->context.inflight);
+
+ for (n = 0; n < ve->num_siblings; n++) {
+ struct intel_engine_cs *sibling = ve->siblings[n];
+ struct rb_node *node = &ve->nodes[sibling->id].rb;
+
+ if (RB_EMPTY_NODE(node))
+ continue;
+
+ spin_lock_irq(&sibling->active.lock);
+
+ /* Detachment is lazily performed in the execlists tasklet */
+ if (!RB_EMPTY_NODE(node))
+ rb_erase_cached(node, &sibling->execlists.virtual);
+
+ spin_unlock_irq(&sibling->active.lock);
+ }
+ GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
+
+ if (ve->context.state)
+ __execlists_context_fini(&ve->context);
+
+ kfree(ve->bonds);
+ kfree(ve);
+}
+
+static void virtual_engine_initial_hint(struct virtual_engine *ve)
+{
+ int swp;
+
+ /*
+ * Pick a random sibling on starting to help spread the load around.
+ *
+ * New contexts are typically created with exactly the same order
+ * of siblings, and often started in batches. Due to the way we iterate
+ * the array of sibling when submitting requests, sibling[0] is
+ * prioritised for dequeuing. If we make sure that sibling[0] is fairly
+ * randomised across the system, we also help spread the load by the
+ * first engine we inspect being different each time.
+ *
+ * NB This does not force us to execute on this engine, it will just
+ * typically be the first we inspect for submission.
+ */
+ swp = prandom_u32_max(ve->num_siblings);
+ if (!swp)
+ return;
+
+ swap(ve->siblings[swp], ve->siblings[0]);
+ virtual_update_register_offsets(ve->context.lrc_reg_state,
+ ve->siblings[0]);
+}
+
+static int virtual_context_pin(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ int err;
+
+ /* Note: we must use a real engine class for setting up reg state */
+ err = __execlists_context_pin(ce, ve->siblings[0]);
+ if (err)
+ return err;
+
+ virtual_engine_initial_hint(ve);
+ return 0;
+}
+
+static void virtual_context_enter(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ unsigned int n;
+
+ for (n = 0; n < ve->num_siblings; n++)
+ intel_engine_pm_get(ve->siblings[n]);
+}
+
+static void virtual_context_exit(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ unsigned int n;
+
+ for (n = 0; n < ve->num_siblings; n++)
+ intel_engine_pm_put(ve->siblings[n]);
+}
+
+static const struct intel_context_ops virtual_context_ops = {
+ .pin = virtual_context_pin,
+ .unpin = execlists_context_unpin,
+
+ .enter = virtual_context_enter,
+ .exit = virtual_context_exit,
+
+ .destroy = virtual_context_destroy,
+};
+
+static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
+{
+ struct i915_request *rq;
+ intel_engine_mask_t mask;
+
+ rq = READ_ONCE(ve->request);
+ if (!rq)
+ return 0;
+
+ /* The rq is ready for submission; rq->execution_mask is now stable. */
+ mask = rq->execution_mask;
+ if (unlikely(!mask)) {
+ /* Invalid selection, submit to a random engine in error */
+ i915_request_skip(rq, -ENODEV);
+ mask = ve->siblings[0]->mask;
+ }
+
+ GEM_TRACE("%s: rq=%llx:%lld, mask=%x, prio=%d\n",
+ ve->base.name,
+ rq->fence.context, rq->fence.seqno,
+ mask, ve->base.execlists.queue_priority_hint);
+
+ return mask;
+}
+
+static void virtual_submission_tasklet(unsigned long data)
+{
+ struct virtual_engine * const ve = (struct virtual_engine *)data;
+ const int prio = ve->base.execlists.queue_priority_hint;
+ intel_engine_mask_t mask;
+ unsigned int n;
+
+ rcu_read_lock();
+ mask = virtual_submission_mask(ve);
+ rcu_read_unlock();
+ if (unlikely(!mask))
+ return;
+
+ local_irq_disable();
+ for (n = 0; READ_ONCE(ve->request) && n < ve->num_siblings; n++) {
+ struct intel_engine_cs *sibling = ve->siblings[n];
+ struct ve_node * const node = &ve->nodes[sibling->id];
+ struct rb_node **parent, *rb;
+ bool first;
+
+ if (unlikely(!(mask & sibling->mask))) {
+ if (!RB_EMPTY_NODE(&node->rb)) {
+ spin_lock(&sibling->active.lock);
+ rb_erase_cached(&node->rb,
+ &sibling->execlists.virtual);
+ RB_CLEAR_NODE(&node->rb);
+ spin_unlock(&sibling->active.lock);
+ }
+ continue;
+ }
+
+ spin_lock(&sibling->active.lock);
+
+ if (!RB_EMPTY_NODE(&node->rb)) {
+ /*
+ * Cheat and avoid rebalancing the tree if we can
+ * reuse this node in situ.
+ */
+ first = rb_first_cached(&sibling->execlists.virtual) ==
+ &node->rb;
+ if (prio == node->prio || (prio > node->prio && first))
+ goto submit_engine;
+
+ rb_erase_cached(&node->rb, &sibling->execlists.virtual);
+ }
+
+ rb = NULL;
+ first = true;
+ parent = &sibling->execlists.virtual.rb_root.rb_node;
+ while (*parent) {
+ struct ve_node *other;
+
+ rb = *parent;
+ other = rb_entry(rb, typeof(*other), rb);
+ if (prio > other->prio) {
+ parent = &rb->rb_left;
+ } else {
+ parent = &rb->rb_right;
+ first = false;
+ }
+ }
+
+ rb_link_node(&node->rb, rb, parent);
+ rb_insert_color_cached(&node->rb,
+ &sibling->execlists.virtual,
+ first);
+
+submit_engine:
+ GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
+ node->prio = prio;
+ if (first && prio > sibling->execlists.queue_priority_hint) {
+ sibling->execlists.queue_priority_hint = prio;
+ tasklet_hi_schedule(&sibling->execlists.tasklet);
+ }
+
+ spin_unlock(&sibling->active.lock);
+ }
+ local_irq_enable();
+}
+
+static void virtual_submit_request(struct i915_request *rq)
+{
+ struct virtual_engine *ve = to_virtual_engine(rq->engine);
+
+ GEM_TRACE("%s: rq=%llx:%lld\n",
+ ve->base.name,
+ rq->fence.context,
+ rq->fence.seqno);
+
+ GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
+
+ GEM_BUG_ON(ve->request);
+ GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+
+ ve->base.execlists.queue_priority_hint = rq_prio(rq);
+ WRITE_ONCE(ve->request, rq);
+
+ list_move_tail(&rq->sched.link, virtual_queue(ve));
+
+ tasklet_schedule(&ve->base.execlists.tasklet);
+}
+
+static struct ve_bond *
+virtual_find_bond(struct virtual_engine *ve,
+ const struct intel_engine_cs *master)
+{
+ int i;
+
+ for (i = 0; i < ve->num_bonds; i++) {
+ if (ve->bonds[i].master == master)
+ return &ve->bonds[i];
+ }
+
+ return NULL;
+}
+
+static void
+virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
+{
+ struct virtual_engine *ve = to_virtual_engine(rq->engine);
+ struct ve_bond *bond;
+
+ bond = virtual_find_bond(ve, to_request(signal)->engine);
+ if (bond) {
+ intel_engine_mask_t old, new, cmp;
+
+ cmp = READ_ONCE(rq->execution_mask);
+ do {
+ old = cmp;
+ new = cmp & bond->sibling_mask;
+ } while ((cmp = cmpxchg(&rq->execution_mask, old, new)) != old);
+ }
+}
+
+struct intel_context *
+intel_execlists_create_virtual(struct i915_gem_context *ctx,
+ struct intel_engine_cs **siblings,
+ unsigned int count)
+{
+ struct virtual_engine *ve;
+ unsigned int n;
+ int err;
+
+ if (count == 0)
+ return ERR_PTR(-EINVAL);
+
+ if (count == 1)
+ return intel_context_create(ctx, siblings[0]);
+
+ ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
+ if (!ve)
+ return ERR_PTR(-ENOMEM);
+
+ ve->base.i915 = ctx->i915;
+ ve->base.id = -1;
+ ve->base.class = OTHER_CLASS;
+ ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
+ ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+ ve->base.flags = I915_ENGINE_IS_VIRTUAL;
+
+ /*
+ * The decision on whether to submit a request using semaphores
+ * depends on the saturated state of the engine. We only compute
+ * this during HW submission of the request, and we need for this
+ * state to be globally applied to all requests being submitted
+ * to this engine. Virtual engines encompass more than one physical
+ * engine and so we cannot accurately tell in advance if one of those
+ * engines is already saturated and so cannot afford to use a semaphore
+ * and be pessimized in priority for doing so -- if we are the only
+ * context using semaphores after all other clients have stopped, we
+ * will be starved on the saturated system. Such a global switch for
+ * semaphores is less than ideal, but alas is the current compromise.
+ */
+ ve->base.saturated = ALL_ENGINES;
+
+ snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
+
+ intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
+
+ intel_engine_init_execlists(&ve->base);
+
+ ve->base.cops = &virtual_context_ops;
+ ve->base.request_alloc = execlists_request_alloc;
+
+ ve->base.schedule = i915_schedule;
+ ve->base.submit_request = virtual_submit_request;
+ ve->base.bond_execute = virtual_bond_execute;
+
+ INIT_LIST_HEAD(virtual_queue(ve));
+ ve->base.execlists.queue_priority_hint = INT_MIN;
+ tasklet_init(&ve->base.execlists.tasklet,
+ virtual_submission_tasklet,
+ (unsigned long)ve);
+
+ intel_context_init(&ve->context, ctx, &ve->base);
+
+ for (n = 0; n < count; n++) {
+ struct intel_engine_cs *sibling = siblings[n];
+
+ GEM_BUG_ON(!is_power_of_2(sibling->mask));
+ if (sibling->mask & ve->base.mask) {
+ DRM_DEBUG("duplicate %s entry in load balancer\n",
+ sibling->name);
+ err = -EINVAL;
+ goto err_put;
+ }
+
+ /*
+ * The virtual engine implementation is tightly coupled to
+ * the execlists backend -- we push out request directly
+ * into a tree inside each physical engine. We could support
+ * layering if we handle cloning of the requests and
+ * submitting a copy into each backend.
+ */
+ if (sibling->execlists.tasklet.func !=
+ execlists_submission_tasklet) {
+ err = -ENODEV;
+ goto err_put;
+ }
+
+ GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb));
+ RB_CLEAR_NODE(&ve->nodes[sibling->id].rb);
+
+ ve->siblings[ve->num_siblings++] = sibling;
+ ve->base.mask |= sibling->mask;
+
+ /*
+ * All physical engines must be compatible for their emission
+ * functions (as we build the instructions during request
+ * construction and do not alter them before submission
+ * on the physical engine). We use the engine class as a guide
+ * here, although that could be refined.
+ */
+ if (ve->base.class != OTHER_CLASS) {
+ if (ve->base.class != sibling->class) {
+ DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
+ sibling->class, ve->base.class);
+ err = -EINVAL;
+ goto err_put;
+ }
+ continue;
+ }
+
+ ve->base.class = sibling->class;
+ ve->base.uabi_class = sibling->uabi_class;
+ snprintf(ve->base.name, sizeof(ve->base.name),
+ "v%dx%d", ve->base.class, count);
+ ve->base.context_size = sibling->context_size;
+
+ ve->base.emit_bb_start = sibling->emit_bb_start;
+ ve->base.emit_flush = sibling->emit_flush;
+ ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb;
+ ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb;
+ ve->base.emit_fini_breadcrumb_dw =
+ sibling->emit_fini_breadcrumb_dw;
+ }
+
+ return &ve->context;
+
+err_put:
+ intel_context_put(&ve->context);
+ return ERR_PTR(err);
+}
+
+struct intel_context *
+intel_execlists_clone_virtual(struct i915_gem_context *ctx,
+ struct intel_engine_cs *src)
+{
+ struct virtual_engine *se = to_virtual_engine(src);
+ struct intel_context *dst;
+
+ dst = intel_execlists_create_virtual(ctx,
+ se->siblings,
+ se->num_siblings);
+ if (IS_ERR(dst))
+ return dst;
+
+ if (se->num_bonds) {
+ struct virtual_engine *de = to_virtual_engine(dst->engine);
+
+ de->bonds = kmemdup(se->bonds,
+ sizeof(*se->bonds) * se->num_bonds,
+ GFP_KERNEL);
+ if (!de->bonds) {
+ intel_context_put(dst);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ de->num_bonds = se->num_bonds;
+ }
+
+ return dst;
+}
+
+int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
+ const struct intel_engine_cs *master,
+ const struct intel_engine_cs *sibling)
+{
+ struct virtual_engine *ve = to_virtual_engine(engine);
+ struct ve_bond *bond;
+ int n;
+
+ /* Sanity check the sibling is part of the virtual engine */
+ for (n = 0; n < ve->num_siblings; n++)
+ if (sibling == ve->siblings[n])
+ break;
+ if (n == ve->num_siblings)
+ return -EINVAL;
+
+ bond = virtual_find_bond(ve, master);
+ if (bond) {
+ bond->sibling_mask |= sibling->mask;
+ return 0;
+ }
+
+ bond = krealloc(ve->bonds,
+ sizeof(*bond) * (ve->num_bonds + 1),
+ GFP_KERNEL);
+ if (!bond)
+ return -ENOMEM;
+
+ bond[ve->num_bonds].master = master;
+ bond[ve->num_bonds].sibling_mask = sibling->mask;
+
+ ve->bonds = bond;
+ ve->num_bonds++;
+
+ return 0;
+}
+
void intel_execlists_show_requests(struct intel_engine_cs *engine,
struct drm_printer *m,
void (*show_request)(struct drm_printer *m,
@@ -2958,11 +3485,11 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
unsigned int count;
struct rb_node *rb;
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
last = NULL;
count = 0;
- list_for_each_entry(rq, &engine->timeline.requests, link) {
+ list_for_each_entry(rq, &engine->active.requests, sched.link) {
if (count++ < max - 1)
show_request(m, rq, "\t\tE ");
else
@@ -3002,7 +3529,30 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
show_request(m, last, "\t\tQ ");
}
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ last = NULL;
+ count = 0;
+ for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ struct i915_request *rq = READ_ONCE(ve->request);
+
+ if (rq) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\tV ");
+ else
+ last = rq;
+ }
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d virtual requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\tV ");
+ }
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
void intel_lr_context_reset(struct intel_engine_cs *engine,
@@ -3037,5 +3587,5 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/intel_lrc.c"
+#include "selftest_lrc.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index 84aa230ea27b..c2bba82bcc16 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -24,8 +24,15 @@
#ifndef _INTEL_LRC_H_
#define _INTEL_LRC_H_
-#include "intel_ringbuffer.h"
-#include "i915_gem_context.h"
+#include <linux/types.h>
+
+struct drm_printer;
+
+struct drm_i915_private;
+struct i915_gem_context;
+struct i915_request;
+struct intel_context;
+struct intel_engine_cs;
/* Execlists regs */
#define RING_ELSP(base) _MMIO((base) + 0x230)
@@ -67,8 +74,9 @@ enum {
/* Logical Rings */
void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
-int logical_render_ring_init(struct intel_engine_cs *engine);
-int logical_xcs_ring_init(struct intel_engine_cs *engine);
+
+int intel_execlists_submission_setup(struct intel_engine_cs *engine);
+int intel_execlists_submission_init(struct intel_engine_cs *engine);
/* Logical Ring Contexts */
@@ -96,11 +104,6 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine);
*/
#define LRC_HEADER_PAGES LRC_PPHWSP_PN
-struct drm_printer;
-
-struct drm_i915_private;
-struct i915_gem_context;
-
void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
void intel_lr_context_reset(struct intel_engine_cs *engine,
@@ -115,6 +118,17 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
const char *prefix),
unsigned int max);
-u32 gen8_make_rpcs(struct drm_i915_private *i915, struct intel_sseu *ctx_sseu);
+struct intel_context *
+intel_execlists_create_virtual(struct i915_gem_context *ctx,
+ struct intel_engine_cs **siblings,
+ unsigned int count);
+
+struct intel_context *
+intel_execlists_clone_virtual(struct i915_gem_context *ctx,
+ struct intel_engine_cs *src);
+
+int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
+ const struct intel_engine_cs *master,
+ const struct intel_engine_cs *sibling);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 5ef932d810a7..6bf34738b4e5 100644
--- a/drivers/gpu/drm/i915/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -55,7 +55,7 @@
#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
u32 *reg_state__ = (reg_state); \
- const u64 addr__ = px_dma(&ppgtt->pml4); \
+ const u64 addr__ = px_dma(ppgtt->pd); \
(reg_state__)[CTX_PDP0_UDW + 1] = upper_32_bits(addr__); \
(reg_state__)[CTX_PDP0_LDW + 1] = lower_32_bits(addr__); \
} while (0)
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 274ba78500c0..1f9db50b1869 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -20,9 +20,11 @@
* SOFTWARE.
*/
+#include "i915_drv.h"
+
+#include "intel_engine.h"
#include "intel_mocs.h"
#include "intel_lrc.h"
-#include "intel_ringbuffer.h"
/* structures required */
struct drm_i915_mocs_entry {
@@ -198,6 +200,14 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
MOCS_ENTRY(15, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
L3_3_WB), \
+ /* Bypass LLC - Uncached (EHL+) */ \
+ MOCS_ENTRY(16, \
+ LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
+ L3_1_UC), \
+ /* Bypass LLC - L3 (Read-Only) (EHL+) */ \
+ MOCS_ENTRY(17, \
+ LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
+ L3_3_WB), \
/* Self-Snoop - L3 + LLC */ \
MOCS_ENTRY(18, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h
index 3d99d1271b2b..0913704a1af2 100644
--- a/drivers/gpu/drm/i915/intel_mocs.h
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.h
@@ -49,7 +49,9 @@
* context handling keep the MOCS in step.
*/
-#include "i915_drv.h"
+struct drm_i915_private;
+struct i915_request;
+struct intel_engine_cs;
int intel_rcs_context_init_mocs(struct i915_request *rq);
void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 677d59304e78..4c478b38e420 100644
--- a/drivers/gpu/drm/i915/i915_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -7,9 +7,16 @@
#include <linux/sched/mm.h>
#include <linux/stop_machine.h>
+#include "display/intel_overlay.h"
+
+#include "gem/i915_gem_context.h"
+
#include "i915_drv.h"
#include "i915_gpu_error.h"
-#include "i915_reset.h"
+#include "i915_irq.h"
+#include "intel_engine_pm.h"
+#include "intel_gt_pm.h"
+#include "intel_reset.h"
#include "intel_guc.h"
@@ -43,12 +50,12 @@ static void engine_skip_context(struct i915_request *rq)
struct intel_engine_cs *engine = rq->engine;
struct i915_gem_context *hung_ctx = rq->gem_context;
- lockdep_assert_held(&engine->timeline.lock);
+ lockdep_assert_held(&engine->active.lock);
if (!i915_request_is_active(rq))
return;
- list_for_each_entry_continue(rq, &engine->timeline.requests, link)
+ list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
if (rq->gem_context == hung_ctx)
i915_request_skip(rq, -EIO);
}
@@ -124,7 +131,7 @@ void i915_reset_request(struct i915_request *rq, bool guilty)
rq->fence.seqno,
yesno(guilty));
- lockdep_assert_held(&rq->engine->timeline.lock);
+ lockdep_assert_held(&rq->engine->active.lock);
GEM_BUG_ON(i915_request_completed(rq));
if (guilty) {
@@ -641,9 +648,6 @@ int intel_gpu_reset(struct drm_i915_private *i915,
bool intel_has_gpu_reset(struct drm_i915_private *i915)
{
- if (USES_GUC(i915))
- return false;
-
if (!i915_modparams.reset)
return NULL;
@@ -683,6 +687,7 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
* written to the powercontext is undefined and so we may lose
* GPU state upon resume, i.e. fail to restart after a reset.
*/
+ intel_engine_pm_get(engine);
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
engine->reset.prepare(engine);
}
@@ -691,19 +696,19 @@ static void revoke_mmaps(struct drm_i915_private *i915)
{
int i;
- for (i = 0; i < i915->num_fence_regs; i++) {
+ for (i = 0; i < i915->ggtt.num_fences; i++) {
struct drm_vma_offset_node *node;
struct i915_vma *vma;
u64 vma_offset;
- vma = READ_ONCE(i915->fence_regs[i].vma);
+ vma = READ_ONCE(i915->ggtt.fence_regs[i].vma);
if (!vma)
continue;
if (!i915_vma_has_userfault(vma))
continue;
- GEM_BUG_ON(vma->fence != &i915->fence_regs[i]);
+ GEM_BUG_ON(vma->fence != &i915->ggtt.fence_regs[i]);
node = &vma->obj->base.vma_node;
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(i915->drm.anon_inode->i_mapping,
@@ -718,6 +723,7 @@ static void reset_prepare(struct drm_i915_private *i915)
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_gt_pm_get(i915);
for_each_engine(engine, i915, id)
reset_prepare_engine(engine);
@@ -755,48 +761,10 @@ static int gt_reset(struct drm_i915_private *i915,
static void reset_finish_engine(struct intel_engine_cs *engine)
{
engine->reset.finish(engine);
+ intel_engine_pm_put(engine);
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
}
-struct i915_gpu_restart {
- struct work_struct work;
- struct drm_i915_private *i915;
-};
-
-static void restart_work(struct work_struct *work)
-{
- struct i915_gpu_restart *arg = container_of(work, typeof(*arg), work);
- struct drm_i915_private *i915 = arg->i915;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- intel_wakeref_t wakeref;
-
- wakeref = intel_runtime_pm_get(i915);
- mutex_lock(&i915->drm.struct_mutex);
- WRITE_ONCE(i915->gpu_error.restart, NULL);
-
- for_each_engine(engine, i915, id) {
- struct i915_request *rq;
-
- /*
- * Ostensibily, we always want a context loaded for powersaving,
- * so if the engine is idle after the reset, send a request
- * to load our scratch kernel_context.
- */
- if (!intel_engine_is_idle(engine))
- continue;
-
- rq = i915_request_alloc(engine, i915->kernel_context);
- if (!IS_ERR(rq))
- i915_request_add(rq);
- }
-
- mutex_unlock(&i915->drm.struct_mutex);
- intel_runtime_pm_put(i915, wakeref);
-
- kfree(arg);
-}
-
static void reset_finish(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@@ -806,29 +774,7 @@ static void reset_finish(struct drm_i915_private *i915)
reset_finish_engine(engine);
intel_engine_signal_breadcrumbs(engine);
}
-}
-
-static void reset_restart(struct drm_i915_private *i915)
-{
- struct i915_gpu_restart *arg;
-
- /*
- * Following the reset, ensure that we always reload context for
- * powersaving, and to correct engine->last_retired_context. Since
- * this requires us to submit a request, queue a worker to do that
- * task for us to evade any locking here.
- */
- if (READ_ONCE(i915->gpu_error.restart))
- return;
-
- arg = kmalloc(sizeof(*arg), GFP_KERNEL);
- if (arg) {
- arg->i915 = i915;
- INIT_WORK(&arg->work, restart_work);
-
- WRITE_ONCE(i915->gpu_error.restart, arg);
- queue_work(i915->wq, &arg->work);
- }
+ intel_gt_pm_put(i915);
}
static void nop_submit_request(struct i915_request *request)
@@ -840,10 +786,10 @@ static void nop_submit_request(struct i915_request *request)
engine->name, request->fence.context, request->fence.seqno);
dma_fence_set_error(&request->fence, -EIO);
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
__i915_request_submit(request);
i915_request_mark_complete(request);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
intel_engine_queue_breadcrumbs(engine);
}
@@ -889,6 +835,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
* in nop_submit_request.
*/
synchronize_rcu_expedited();
+ set_bit(I915_WEDGED, &error->flags);
/* Mark all executing requests as skipped */
for_each_engine(engine, i915, id)
@@ -896,9 +843,6 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
reset_finish(i915);
- smp_mb__before_atomic();
- set_bit(I915_WEDGED, &error->flags);
-
GEM_TRACE("end\n");
}
@@ -908,7 +852,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
intel_wakeref_t wakeref;
mutex_lock(&error->wedge_mutex);
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
__i915_gem_set_wedged(i915);
mutex_unlock(&error->wedge_mutex);
}
@@ -956,7 +900,7 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
}
mutex_unlock(&i915->gt.timelines.mutex);
- intel_engines_sanitize(i915, false);
+ intel_gt_sanitize(i915, false);
/*
* Undo nop_submit_request. We prevent all new i915 requests from
@@ -1034,12 +978,12 @@ void i915_reset(struct drm_i915_private *i915,
GEM_TRACE("flags=%lx\n", error->flags);
might_sleep();
- assert_rpm_wakelock_held(i915);
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
+ mutex_lock(&error->wedge_mutex);
/* Clear any previous failed attempts at recovery. Time to try again. */
if (!__i915_gem_unset_wedged(i915))
- return;
+ goto unlock;
if (reason)
dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
@@ -1087,8 +1031,8 @@ void i915_reset(struct drm_i915_private *i915,
finish:
reset_finish(i915);
- if (!__i915_wedged(error))
- reset_restart(i915);
+unlock:
+ mutex_unlock(&error->wedge_mutex);
return;
taint:
@@ -1104,7 +1048,7 @@ taint:
* rather than continue on into oblivion. For everyone else,
* the system should still plod along, but they have been warned!
*/
- add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+ add_taint_for_CI(TAINT_WARN);
error:
__i915_gem_set_wedged(i915);
goto finish;
@@ -1137,6 +1081,9 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
+ if (!intel_wakeref_active(&engine->wakeref))
+ return 0;
+
reset_prepare_engine(engine);
if (msg)
@@ -1168,7 +1115,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
* have been reset to their default values. Follow the init_ring
* process to program RING_MODE, HWSP and re-enable submission.
*/
- ret = engine->init_hw(engine);
+ ret = engine->resume(engine);
if (ret)
goto out;
@@ -1201,9 +1148,7 @@ static void i915_reset_device(struct drm_i915_private *i915,
/* Flush everyone using a resource about to be clobbered */
synchronize_srcu_expedited(&error->reset_backoff_srcu);
- mutex_lock(&error->wedge_mutex);
i915_reset(i915, engine_mask, reason);
- mutex_unlock(&error->wedge_mutex);
intel_finish_reset(i915);
}
@@ -1217,7 +1162,14 @@ static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
intel_uncore_rmw(uncore, reg, 0, 0);
}
-void i915_clear_error_registers(struct drm_i915_private *i915)
+static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
+{
+ GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
+ GEN6_RING_FAULT_REG_POSTING_READ(engine);
+}
+
+static void clear_error_registers(struct drm_i915_private *i915,
+ intel_engine_mask_t engine_mask)
{
struct intel_uncore *uncore = &i915->uncore;
u32 eir;
@@ -1250,15 +1202,74 @@ void i915_clear_error_registers(struct drm_i915_private *i915)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
- rmw_clear(uncore,
- RING_FAULT_REG(engine), RING_FAULT_VALID);
- intel_uncore_posting_read(uncore,
- RING_FAULT_REG(engine));
+ for_each_engine_masked(engine, i915, engine_mask, id)
+ gen8_clear_engine_error_register(engine);
+ }
+}
+
+static void gen6_check_faults(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 fault;
+
+ for_each_engine(engine, dev_priv, id) {
+ fault = GEN6_RING_FAULT_REG_READ(engine);
+ if (fault & RING_FAULT_VALID) {
+ DRM_DEBUG_DRIVER("Unexpected fault\n"
+ "\tAddr: 0x%08lx\n"
+ "\tAddress space: %s\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ fault & PAGE_MASK,
+ fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
}
}
}
+static void gen8_check_faults(struct drm_i915_private *dev_priv)
+{
+ u32 fault = I915_READ(GEN8_RING_FAULT_REG);
+
+ if (fault & RING_FAULT_VALID) {
+ u32 fault_data0, fault_data1;
+ u64 fault_addr;
+
+ fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
+ fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
+ fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
+ ((u64)fault_data0 << 12);
+
+ DRM_DEBUG_DRIVER("Unexpected fault\n"
+ "\tAddr: 0x%08x_%08x\n"
+ "\tAddress space: %s\n"
+ "\tEngine ID: %d\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ upper_32_bits(fault_addr),
+ lower_32_bits(fault_addr),
+ fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
+ GEN8_RING_FAULT_ENGINE_ID(fault),
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
+ }
+}
+
+void i915_check_and_clear_faults(struct drm_i915_private *i915)
+{
+ /* From GEN8 onwards we only have one 'All Engine Fault Register' */
+ if (INTEL_GEN(i915) >= 8)
+ gen8_check_faults(i915);
+ else if (INTEL_GEN(i915) >= 6)
+ gen6_check_faults(i915);
+ else
+ return;
+
+ clear_error_registers(i915, ALL_ENGINES);
+}
+
/**
* i915_handle_error - handle a gpu error
* @i915: i915 device private
@@ -1301,13 +1312,13 @@ void i915_handle_error(struct drm_i915_private *i915,
* isn't the case at least when we get here by doing a
* simulated reset via debugfs, so get an RPM reference.
*/
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
engine_mask &= INTEL_INFO(i915)->engine_mask;
if (flags & I915_ERROR_CAPTURE) {
i915_capture_error_state(i915, engine_mask, msg);
- i915_clear_error_registers(i915);
+ clear_error_registers(i915, engine_mask);
}
/*
@@ -1364,7 +1375,7 @@ void i915_handle_error(struct drm_i915_private *i915,
wake_up_all(&error->reset_queue);
out:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
int i915_reset_trylock(struct drm_i915_private *i915)
@@ -1425,25 +1436,6 @@ int i915_terminally_wedged(struct drm_i915_private *i915)
return __i915_wedged(error) ? -EIO : 0;
}
-bool i915_reset_flush(struct drm_i915_private *i915)
-{
- int err;
-
- cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
-
- flush_workqueue(i915->wq);
- GEM_BUG_ON(READ_ONCE(i915->gpu_error.restart));
-
- mutex_lock(&i915->drm.struct_mutex);
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED |
- I915_WAIT_FOR_IDLE_BOOST,
- MAX_SCHEDULE_TIMEOUT);
- mutex_unlock(&i915->drm.struct_mutex);
-
- return !err;
-}
-
static void i915_wedge_me(struct work_struct *work)
{
struct i915_wedge_me *w = container_of(work, typeof(*w), work.work);
@@ -1472,3 +1464,7 @@ void __i915_fini_wedge(struct i915_wedge_me *w)
destroy_delayed_work_on_stack(&w->work);
w->i915 = NULL;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_reset.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index 3c0450289b8f..580ebdb59eca 100644
--- a/drivers/gpu/drm/i915/i915_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -11,7 +11,7 @@
#include <linux/types.h>
#include <linux/srcu.h>
-#include "intel_engine_types.h"
+#include "gt/intel_engine_types.h"
struct drm_i915_private;
struct i915_request;
@@ -25,7 +25,7 @@ void i915_handle_error(struct drm_i915_private *i915,
const char *fmt, ...);
#define I915_ERROR_CAPTURE BIT(0)
-void i915_clear_error_registers(struct drm_i915_private *i915);
+void i915_check_and_clear_faults(struct drm_i915_private *i915);
void i915_reset(struct drm_i915_private *i915,
intel_engine_mask_t stalled_mask,
@@ -34,7 +34,6 @@ int i915_reset_engine(struct intel_engine_cs *engine,
const char *reason);
void i915_reset_request(struct i915_request *rq, bool guilty);
-bool i915_reset_flush(struct drm_i915_private *i915);
int __must_check i915_reset_trylock(struct drm_i915_private *i915);
void i915_reset_unlock(struct drm_i915_private *i915, int tag);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index f0d45ccc1aac..c6023bc9452d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -31,11 +31,13 @@
#include <drm/i915_drm.h>
+#include "gem/i915_gem_context.h"
+
#include "i915_drv.h"
#include "i915_gem_render_state.h"
-#include "i915_reset.h"
#include "i915_trace.h"
-#include "intel_drv.h"
+#include "intel_context.h"
+#include "intel_reset.h"
#include "intel_workarounds.h"
/* Rough estimate of the typical request size, performing a flush,
@@ -310,11 +312,6 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = rq->timeline->hwsp_offset | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = rq->fence.seqno;
- *cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_STORE_DATA_INDEX;
- *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | PIPE_CONTROL_GLOBAL_GTT;
- *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
-
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
@@ -416,13 +413,6 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = rq->timeline->hwsp_offset;
*cs++ = rq->fence.seqno;
- *cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = (PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_STORE_DATA_INDEX |
- PIPE_CONTROL_GLOBAL_GTT_IVB);
- *cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
- *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
-
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
@@ -441,12 +431,7 @@ static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = rq->fence.seqno;
- *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
- *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT;
- *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
-
*cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
@@ -466,10 +451,6 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = rq->fence.seqno;
- *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
- *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT;
- *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
-
for (i = 0; i < GEN7_XCS_WA; i++) {
*cs++ = MI_STORE_DWORD_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR;
@@ -481,6 +462,7 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = 0;
*cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
@@ -638,12 +620,15 @@ static bool stop_ring(struct intel_engine_cs *engine)
return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
}
-static int init_ring_common(struct intel_engine_cs *engine)
+static int xcs_resume(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_ring *ring = engine->buffer;
int ret = 0;
+ GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n",
+ engine->name, ring->head, ring->tail);
+
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
if (!stop_ring(engine)) {
@@ -745,14 +730,13 @@ static void reset_prepare(struct intel_engine_cs *engine)
static void reset_ring(struct intel_engine_cs *engine, bool stalled)
{
- struct i915_timeline *tl = &engine->timeline;
struct i915_request *pos, *rq;
unsigned long flags;
u32 head;
rq = NULL;
- spin_lock_irqsave(&tl->lock, flags);
- list_for_each_entry(pos, &tl->requests, link) {
+ spin_lock_irqsave(&engine->active.lock, flags);
+ list_for_each_entry(pos, &engine->active.requests, sched.link) {
if (!i915_request_completed(pos)) {
rq = pos;
break;
@@ -806,7 +790,7 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled)
}
engine->buffer->head = intel_ring_wrap(engine->buffer, head);
- spin_unlock_irqrestore(&tl->lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void reset_finish(struct intel_engine_cs *engine)
@@ -828,12 +812,23 @@ static int intel_rcs_ctx_init(struct i915_request *rq)
return 0;
}
-static int init_render_ring(struct intel_engine_cs *engine)
+static int rcs_resume(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- int ret = init_ring_common(engine);
- if (ret)
- return ret;
+
+ /*
+ * Disable CONSTANT_BUFFER before it is loaded from the context
+ * image. For as it is loaded, it is executed and the stored
+ * address may no longer be valid, leading to a GPU hang.
+ *
+ * This imposes the requirement that userspace reload their
+ * CONSTANT_BUFFER on every batch, fortunately a requirement
+ * they are already accustomed to from before contexts were
+ * enabled.
+ */
+ if (IS_GEN(dev_priv, 4))
+ I915_WRITE(ECOSKPD,
+ _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE));
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
if (IS_GEN_RANGE(dev_priv, 4, 6))
@@ -873,10 +868,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
if (IS_GEN_RANGE(dev_priv, 6, 7))
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
- if (INTEL_GEN(dev_priv) >= 6)
- ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
-
- return 0;
+ return xcs_resume(engine);
}
static void cancel_requests(struct intel_engine_cs *engine)
@@ -884,10 +876,10 @@ static void cancel_requests(struct intel_engine_cs *engine)
struct i915_request *request;
unsigned long flags;
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
/* Mark all submitted requests as skipped. */
- list_for_each_entry(request, &engine->timeline.requests, link) {
+ list_for_each_entry(request, &engine->active.requests, sched.link) {
if (!i915_request_signaled(request))
dma_fence_set_error(&request->fence, -EIO);
@@ -896,7 +888,7 @@ static void cancel_requests(struct intel_engine_cs *engine)
/* Remaining _unready_ requests will be nop'ed when submitted */
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void i9xx_submit_request(struct i915_request *request)
@@ -918,11 +910,8 @@ static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = I915_GEM_HWS_SEQNO_ADDR;
*cs++ = rq->fence.seqno;
- *cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
- *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
-
*cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
@@ -940,10 +929,6 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = MI_FLUSH;
- *cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
- *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
-
BUILD_BUG_ON(GEN5_WA_STORES < 1);
for (i = 0; i < GEN5_WA_STORES; i++) {
*cs++ = MI_STORE_DWORD_INDEX;
@@ -952,7 +937,6 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
}
*cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
@@ -991,20 +975,20 @@ i9xx_irq_disable(struct intel_engine_cs *engine)
static void
i8xx_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
- dev_priv->irq_mask &= ~engine->irq_enable_mask;
- I915_WRITE16(GEN2_IMR, dev_priv->irq_mask);
- POSTING_READ16(RING_IMR(engine->mmio_base));
+ i915->irq_mask &= ~engine->irq_enable_mask;
+ intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
+ ENGINE_POSTING_READ16(engine, RING_IMR);
}
static void
i8xx_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
- dev_priv->irq_mask |= engine->irq_enable_mask;
- I915_WRITE16(GEN2_IMR, dev_priv->irq_mask);
+ i915->irq_mask |= engine->irq_enable_mask;
+ intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
}
static int
@@ -1282,8 +1266,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
GEM_BUG_ON(!is_power_of_2(size));
GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
- GEM_BUG_ON(timeline == &engine->timeline);
- lockdep_assert_held(&engine->i915->drm.struct_mutex);
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
@@ -1317,10 +1299,9 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
void intel_ring_free(struct kref *ref)
{
struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
- struct drm_i915_gem_object *obj = ring->vma->obj;
i915_vma_close(ring->vma);
- __i915_gem_object_release_unless_active(obj);
+ i915_vma_put(ring->vma);
i915_timeline_put(ring->timeline);
kfree(ring);
@@ -1346,64 +1327,28 @@ static void ring_context_destroy(struct kref *ref)
static int __context_pin_ppgtt(struct i915_gem_context *ctx)
{
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_address_space *vm;
int err = 0;
- ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
- if (ppgtt)
- err = gen6_ppgtt_pin(ppgtt);
+ vm = ctx->vm ?: &ctx->i915->mm.aliasing_ppgtt->vm;
+ if (vm)
+ err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)));
return err;
}
static void __context_unpin_ppgtt(struct i915_gem_context *ctx)
{
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_address_space *vm;
- ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
- if (ppgtt)
- gen6_ppgtt_unpin(ppgtt);
-}
-
-static int __context_pin(struct intel_context *ce)
-{
- struct i915_vma *vma;
- int err;
-
- vma = ce->state;
- if (!vma)
- return 0;
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
- if (err)
- return err;
-
- /*
- * And mark is as a globally pinned object to let the shrinker know
- * it cannot reclaim the object until we release it.
- */
- vma->obj->pin_global++;
- vma->obj->mm.dirty = true;
-
- return 0;
-}
-
-static void __context_unpin(struct intel_context *ce)
-{
- struct i915_vma *vma;
-
- vma = ce->state;
- if (!vma)
- return;
-
- vma->obj->pin_global--;
- i915_vma_unpin(vma);
+ vm = ctx->vm ?: &ctx->i915->mm.aliasing_ppgtt->vm;
+ if (vm)
+ gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
}
static void ring_context_unpin(struct intel_context *ce)
{
__context_unpin_ppgtt(ce->gem_context);
- __context_unpin(ce);
}
static struct i915_vma *
@@ -1414,7 +1359,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
struct i915_vma *vma;
int err;
- obj = i915_gem_object_create(i915, engine->context_size);
+ obj = i915_gem_object_create_shmem(i915, engine->context_size);
if (IS_ERR(obj))
return ERR_CAST(obj);
@@ -1493,18 +1438,18 @@ static int ring_context_pin(struct intel_context *ce)
ce->state = vma;
}
- err = __context_pin(ce);
+ err = intel_context_active_acquire(ce, PIN_HIGH);
if (err)
return err;
err = __context_pin_ppgtt(ce->gem_context);
if (err)
- goto err_unpin;
+ goto err_active;
return 0;
-err_unpin:
- __context_unpin(ce);
+err_active:
+ intel_context_active_release(ce);
return err;
}
@@ -1517,79 +1462,14 @@ static const struct intel_context_ops ring_context_ops = {
.pin = ring_context_pin,
.unpin = ring_context_unpin,
+ .enter = intel_context_enter_engine,
+ .exit = intel_context_exit_engine,
+
.reset = ring_context_reset,
.destroy = ring_context_destroy,
};
-static int intel_init_ring_buffer(struct intel_engine_cs *engine)
-{
- struct i915_timeline *timeline;
- struct intel_ring *ring;
- int err;
-
- err = intel_engine_setup_common(engine);
- if (err)
- return err;
-
- timeline = i915_timeline_create(engine->i915, engine->status_page.vma);
- if (IS_ERR(timeline)) {
- err = PTR_ERR(timeline);
- goto err;
- }
- GEM_BUG_ON(timeline->has_initial_breadcrumb);
-
- ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
- i915_timeline_put(timeline);
- if (IS_ERR(ring)) {
- err = PTR_ERR(ring);
- goto err;
- }
-
- err = intel_ring_pin(ring);
- if (err)
- goto err_ring;
-
- GEM_BUG_ON(engine->buffer);
- engine->buffer = ring;
-
- err = intel_engine_init_common(engine);
- if (err)
- goto err_unpin;
-
- GEM_BUG_ON(ring->timeline->hwsp_ggtt != engine->status_page.vma);
-
- return 0;
-
-err_unpin:
- intel_ring_unpin(ring);
-err_ring:
- intel_ring_put(ring);
-err:
- intel_engine_cleanup_common(engine);
- return err;
-}
-
-void intel_engine_cleanup(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- WARN_ON(INTEL_GEN(dev_priv) > 2 &&
- (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
-
- intel_ring_unpin(engine->buffer);
- intel_ring_put(engine->buffer);
-
- if (engine->cleanup)
- engine->cleanup(engine);
-
- intel_engine_cleanup_common(engine);
-
- dev_priv->engine[engine->id] = NULL;
- kfree(engine);
-}
-
-static int load_pd_dir(struct i915_request *rq,
- const struct i915_hw_ppgtt *ppgtt)
+static int load_pd_dir(struct i915_request *rq, const struct i915_ppgtt *ppgtt)
{
const struct intel_engine_cs * const engine = rq->engine;
u32 *cs;
@@ -1604,7 +1484,7 @@ static int load_pd_dir(struct i915_request *rq,
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
- *cs++ = ppgtt->pd.base.ggtt_offset << 10;
+ *cs++ = ppgtt->pd->base.ggtt_offset << 10;
intel_ring_advance(rq, cs);
@@ -1646,11 +1526,14 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* These flags are for resource streamer on HSW+ */
flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
else
+ /* We need to save the extended state for powersaving modes */
flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
len = 4;
if (IS_GEN(i915, 7))
len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
+ else if (IS_GEN(i915, 5))
+ len += 2;
if (flags & MI_FORCE_RESTORE) {
GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
flags &= ~MI_FORCE_RESTORE;
@@ -1679,6 +1562,14 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
GEN6_PSMI_SLEEP_MSG_DISABLE);
}
}
+ } else if (IS_GEN(i915, 5)) {
+ /*
+ * This w/a is only listed for pre-production ilk a/b steppings,
+ * but is also mentioned for programming the powerctx. To be
+ * safe, just apply the workaround; we do not use SyncFlush so
+ * this should never take effect and so be a no-op!
+ */
+ *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN;
}
if (force_restore) {
@@ -1732,6 +1623,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*cs++ = MI_NOOP;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ } else if (IS_GEN(i915, 5)) {
+ *cs++ = MI_SUSPEND_FLUSH;
}
intel_ring_advance(rq, cs);
@@ -1771,15 +1664,16 @@ static int switch_context(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct i915_gem_context *ctx = rq->gem_context;
- struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
+ struct i915_address_space *vm =
+ ctx->vm ?: &rq->i915->mm.aliasing_ppgtt->vm;
unsigned int unwind_mm = 0;
u32 hw_flags = 0;
int ret, i;
- lockdep_assert_held(&rq->i915->drm.struct_mutex);
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
- if (ppgtt) {
+ if (vm) {
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
int loops;
/*
@@ -1826,7 +1720,7 @@ static int switch_context(struct i915_request *rq)
goto err_mm;
}
- if (ppgtt) {
+ if (vm) {
ret = engine->emit_flush(rq, EMIT_INVALIDATE);
if (ret)
goto err_mm;
@@ -1869,7 +1763,7 @@ static int switch_context(struct i915_request *rq)
err_mm:
if (unwind_mm)
- ppgtt->pd_dirty_engines |= unwind_mm;
+ i915_vm_to_ppgtt(vm)->pd_dirty_engines |= unwind_mm;
err:
return ret;
}
@@ -1906,8 +1800,6 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
struct i915_request *target;
long timeout;
- lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex);
-
if (intel_ring_update_space(ring) >= bytes)
return 0;
@@ -1923,7 +1815,7 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
return -ENOSPC;
timeout = i915_request_wait(target,
- I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+ I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0)
return timeout;
@@ -2167,24 +2059,6 @@ static int gen6_ring_flush(struct i915_request *rq, u32 mode)
return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
}
-static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine)
-{
- if (INTEL_GEN(dev_priv) >= 6) {
- engine->irq_enable = gen6_irq_enable;
- engine->irq_disable = gen6_irq_disable;
- } else if (INTEL_GEN(dev_priv) >= 5) {
- engine->irq_enable = gen5_irq_enable;
- engine->irq_disable = gen5_irq_disable;
- } else if (INTEL_GEN(dev_priv) >= 3) {
- engine->irq_enable = i9xx_irq_enable;
- engine->irq_disable = i9xx_irq_disable;
- } else {
- engine->irq_enable = i8xx_irq_enable;
- engine->irq_disable = i8xx_irq_disable;
- }
-}
-
static void i9xx_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = i9xx_submit_request;
@@ -2200,15 +2074,51 @@ static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
engine->submit_request = gen6_bsd_submit_request;
}
-static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine)
+static void ring_destroy(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ WARN_ON(INTEL_GEN(dev_priv) > 2 &&
+ (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+
+ intel_ring_unpin(engine->buffer);
+ intel_ring_put(engine->buffer);
+
+ intel_engine_cleanup_common(engine);
+ kfree(engine);
+}
+
+static void setup_irq(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ if (INTEL_GEN(i915) >= 6) {
+ engine->irq_enable = gen6_irq_enable;
+ engine->irq_disable = gen6_irq_disable;
+ } else if (INTEL_GEN(i915) >= 5) {
+ engine->irq_enable = gen5_irq_enable;
+ engine->irq_disable = gen5_irq_disable;
+ } else if (INTEL_GEN(i915) >= 3) {
+ engine->irq_enable = i9xx_irq_enable;
+ engine->irq_disable = i9xx_irq_disable;
+ } else {
+ engine->irq_enable = i8xx_irq_enable;
+ engine->irq_disable = i8xx_irq_disable;
+ }
+}
+
+static void setup_common(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
/* gen8+ are only supported with execlists */
- GEM_BUG_ON(INTEL_GEN(dev_priv) >= 8);
+ GEM_BUG_ON(INTEL_GEN(i915) >= 8);
+
+ setup_irq(engine);
- intel_ring_init_irq(dev_priv, engine);
+ engine->destroy = ring_destroy;
- engine->init_hw = init_ring_common;
+ engine->resume = xcs_resume;
engine->reset.prepare = reset_prepare;
engine->reset.reset = reset_ring;
engine->reset.finish = reset_finish;
@@ -2222,117 +2132,96 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
* engine->emit_init_breadcrumb().
*/
engine->emit_fini_breadcrumb = i9xx_emit_breadcrumb;
- if (IS_GEN(dev_priv, 5))
+ if (IS_GEN(i915, 5))
engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
engine->set_default_submission = i9xx_set_default_submission;
- if (INTEL_GEN(dev_priv) >= 6)
+ if (INTEL_GEN(i915) >= 6)
engine->emit_bb_start = gen6_emit_bb_start;
- else if (INTEL_GEN(dev_priv) >= 4)
+ else if (INTEL_GEN(i915) >= 4)
engine->emit_bb_start = i965_emit_bb_start;
- else if (IS_I830(dev_priv) || IS_I845G(dev_priv))
+ else if (IS_I830(i915) || IS_I845G(i915))
engine->emit_bb_start = i830_emit_bb_start;
else
engine->emit_bb_start = i915_emit_bb_start;
}
-int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
+static void setup_rcs(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- intel_ring_default_vfuncs(dev_priv, engine);
+ struct drm_i915_private *i915 = engine->i915;
- if (HAS_L3_DPF(dev_priv))
+ if (HAS_L3_DPF(i915))
engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
- if (INTEL_GEN(dev_priv) >= 7) {
+ if (INTEL_GEN(i915) >= 7) {
engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen7_render_ring_flush;
engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb;
- } else if (IS_GEN(dev_priv, 6)) {
+ } else if (IS_GEN(i915, 6)) {
engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen6_render_ring_flush;
engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb;
- } else if (IS_GEN(dev_priv, 5)) {
+ } else if (IS_GEN(i915, 5)) {
engine->emit_flush = gen4_render_ring_flush;
} else {
- if (INTEL_GEN(dev_priv) < 4)
+ if (INTEL_GEN(i915) < 4)
engine->emit_flush = gen2_render_ring_flush;
else
engine->emit_flush = gen4_render_ring_flush;
engine->irq_enable_mask = I915_USER_INTERRUPT;
}
- if (IS_HASWELL(dev_priv))
+ if (IS_HASWELL(i915))
engine->emit_bb_start = hsw_emit_bb_start;
- engine->init_hw = init_render_ring;
-
- ret = intel_init_ring_buffer(engine);
- if (ret)
- return ret;
-
- return 0;
+ engine->resume = rcs_resume;
}
-int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
+static void setup_vcs(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- intel_ring_default_vfuncs(dev_priv, engine);
+ struct drm_i915_private *i915 = engine->i915;
- if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(i915) >= 6) {
/* gen6 bsd needs a special wa for tail updates */
- if (IS_GEN(dev_priv, 6))
+ if (IS_GEN(i915, 6))
engine->set_default_submission = gen6_bsd_set_default_submission;
engine->emit_flush = gen6_bsd_ring_flush;
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
- if (IS_GEN(dev_priv, 6))
+ if (IS_GEN(i915, 6))
engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
else
engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
} else {
engine->emit_flush = bsd_ring_flush;
- if (IS_GEN(dev_priv, 5))
+ if (IS_GEN(i915, 5))
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
else
engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
}
-
- return intel_init_ring_buffer(engine);
}
-int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
+static void setup_bcs(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
-
- intel_ring_default_vfuncs(dev_priv, engine);
+ struct drm_i915_private *i915 = engine->i915;
engine->emit_flush = gen6_ring_flush;
engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
- if (IS_GEN(dev_priv, 6))
+ if (IS_GEN(i915, 6))
engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
else
engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
-
- return intel_init_ring_buffer(engine);
}
-int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
+static void setup_vecs(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- GEM_BUG_ON(INTEL_GEN(dev_priv) < 7);
+ struct drm_i915_private *i915 = engine->i915;
- intel_ring_default_vfuncs(dev_priv, engine);
+ GEM_BUG_ON(INTEL_GEN(i915) < 7);
engine->emit_flush = gen6_ring_flush;
engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
@@ -2340,6 +2229,73 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
engine->irq_disable = hsw_vebox_irq_disable;
engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
+}
+
+int intel_ring_submission_setup(struct intel_engine_cs *engine)
+{
+ setup_common(engine);
+
+ switch (engine->class) {
+ case RENDER_CLASS:
+ setup_rcs(engine);
+ break;
+ case VIDEO_DECODE_CLASS:
+ setup_vcs(engine);
+ break;
+ case COPY_ENGINE_CLASS:
+ setup_bcs(engine);
+ break;
+ case VIDEO_ENHANCEMENT_CLASS:
+ setup_vecs(engine);
+ break;
+ default:
+ MISSING_CASE(engine->class);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int intel_ring_submission_init(struct intel_engine_cs *engine)
+{
+ struct i915_timeline *timeline;
+ struct intel_ring *ring;
+ int err;
+
+ timeline = i915_timeline_create(engine->i915, engine->status_page.vma);
+ if (IS_ERR(timeline)) {
+ err = PTR_ERR(timeline);
+ goto err;
+ }
+ GEM_BUG_ON(timeline->has_initial_breadcrumb);
- return intel_init_ring_buffer(engine);
+ ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
+ i915_timeline_put(timeline);
+ if (IS_ERR(ring)) {
+ err = PTR_ERR(ring);
+ goto err;
+ }
+
+ err = intel_ring_pin(ring);
+ if (err)
+ goto err_ring;
+
+ GEM_BUG_ON(engine->buffer);
+ engine->buffer = ring;
+
+ err = intel_engine_init_common(engine);
+ if (err)
+ goto err_unpin;
+
+ GEM_BUG_ON(ring->timeline->hwsp_ggtt != engine->status_page.vma);
+
+ return 0;
+
+err_unpin:
+ intel_ring_unpin(ring);
+err_ring:
+ intel_ring_put(ring);
+err:
+ intel_engine_cleanup_common(engine);
+ return err;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
new file mode 100644
index 000000000000..a0756f006f5f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -0,0 +1,159 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_lrc_reg.h"
+#include "intel_sseu.h"
+
+unsigned int
+intel_sseu_subslice_total(const struct sseu_dev_info *sseu)
+{
+ unsigned int i, total = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask); i++)
+ total += hweight8(sseu->subslice_mask[i]);
+
+ return total;
+}
+
+unsigned int
+intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice)
+{
+ return hweight8(sseu->subslice_mask[slice]);
+}
+
+u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
+ const struct intel_sseu *req_sseu)
+{
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+ bool subslice_pg = sseu->has_subslice_pg;
+ struct intel_sseu ctx_sseu;
+ u8 slices, subslices;
+ u32 rpcs = 0;
+
+ /*
+ * No explicit RPCS request is needed to ensure full
+ * slice/subslice/EU enablement prior to Gen9.
+ */
+ if (INTEL_GEN(i915) < 9)
+ return 0;
+
+ /*
+ * If i915/perf is active, we want a stable powergating configuration
+ * on the system.
+ *
+ * We could choose full enablement, but on ICL we know there are use
+ * cases which disable slices for functional, apart for performance
+ * reasons. So in this case we select a known stable subset.
+ */
+ if (!i915->perf.oa.exclusive_stream) {
+ ctx_sseu = *req_sseu;
+ } else {
+ ctx_sseu = intel_sseu_from_device_info(sseu);
+
+ if (IS_GEN(i915, 11)) {
+ /*
+ * We only need subslice count so it doesn't matter
+ * which ones we select - just turn off low bits in the
+ * amount of half of all available subslices per slice.
+ */
+ ctx_sseu.subslice_mask =
+ ~(~0 << (hweight8(ctx_sseu.subslice_mask) / 2));
+ ctx_sseu.slice_mask = 0x1;
+ }
+ }
+
+ slices = hweight8(ctx_sseu.slice_mask);
+ subslices = hweight8(ctx_sseu.subslice_mask);
+
+ /*
+ * Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits
+ * wide and Icelake has up to eight subslices, specfial programming is
+ * needed in order to correctly enable all subslices.
+ *
+ * According to documentation software must consider the configuration
+ * as 2x4x8 and hardware will translate this to 1x8x8.
+ *
+ * Furthemore, even though SScount is three bits, maximum documented
+ * value for it is four. From this some rules/restrictions follow:
+ *
+ * 1.
+ * If enabled subslice count is greater than four, two whole slices must
+ * be enabled instead.
+ *
+ * 2.
+ * When more than one slice is enabled, hardware ignores the subslice
+ * count altogether.
+ *
+ * From these restrictions it follows that it is not possible to enable
+ * a count of subslices between the SScount maximum of four restriction,
+ * and the maximum available number on a particular SKU. Either all
+ * subslices are enabled, or a count between one and four on the first
+ * slice.
+ */
+ if (IS_GEN(i915, 11) &&
+ slices == 1 &&
+ subslices > min_t(u8, 4, hweight8(sseu->subslice_mask[0]) / 2)) {
+ GEM_BUG_ON(subslices & 1);
+
+ subslice_pg = false;
+ slices *= 2;
+ }
+
+ /*
+ * Starting in Gen9, render power gating can leave
+ * slice/subslice/EU in a partially enabled state. We
+ * must make an explicit request through RPCS for full
+ * enablement.
+ */
+ if (sseu->has_slice_pg) {
+ u32 mask, val = slices;
+
+ if (INTEL_GEN(i915) >= 11) {
+ mask = GEN11_RPCS_S_CNT_MASK;
+ val <<= GEN11_RPCS_S_CNT_SHIFT;
+ } else {
+ mask = GEN8_RPCS_S_CNT_MASK;
+ val <<= GEN8_RPCS_S_CNT_SHIFT;
+ }
+
+ GEM_BUG_ON(val & ~mask);
+ val &= mask;
+
+ rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_S_CNT_ENABLE | val;
+ }
+
+ if (subslice_pg) {
+ u32 val = subslices;
+
+ val <<= GEN8_RPCS_SS_CNT_SHIFT;
+
+ GEM_BUG_ON(val & ~GEN8_RPCS_SS_CNT_MASK);
+ val &= GEN8_RPCS_SS_CNT_MASK;
+
+ rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val;
+ }
+
+ if (sseu->has_eu_pg) {
+ u32 val;
+
+ val = ctx_sseu.min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
+ GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
+ val &= GEN8_RPCS_EU_MIN_MASK;
+
+ rpcs |= val;
+
+ val = ctx_sseu.max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
+ GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
+ val &= GEN8_RPCS_EU_MAX_MASK;
+
+ rpcs |= val;
+
+ rpcs |= GEN8_RPCS_ENABLE;
+ }
+
+ return rpcs;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
new file mode 100644
index 000000000000..b50d0401a4e2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -0,0 +1,75 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_SSEU_H__
+#define __INTEL_SSEU_H__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+struct drm_i915_private;
+
+#define GEN_MAX_SLICES (6) /* CNL upper bound */
+#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */
+#define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE)
+
+struct sseu_dev_info {
+ u8 slice_mask;
+ u8 subslice_mask[GEN_MAX_SLICES];
+ u16 eu_total;
+ u8 eu_per_subslice;
+ u8 min_eu_in_pool;
+ /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
+ u8 subslice_7eu[3];
+ u8 has_slice_pg:1;
+ u8 has_subslice_pg:1;
+ u8 has_eu_pg:1;
+
+ /* Topology fields */
+ u8 max_slices;
+ u8 max_subslices;
+ u8 max_eus_per_subslice;
+
+ /* We don't have more than 8 eus per subslice at the moment and as we
+ * store eus enabled using bits, no need to multiply by eus per
+ * subslice.
+ */
+ u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES];
+};
+
+/*
+ * Powergating configuration for a particular (context,engine).
+ */
+struct intel_sseu {
+ u8 slice_mask;
+ u8 subslice_mask;
+ u8 min_eus_per_subslice;
+ u8 max_eus_per_subslice;
+};
+
+static inline struct intel_sseu
+intel_sseu_from_device_info(const struct sseu_dev_info *sseu)
+{
+ struct intel_sseu value = {
+ .slice_mask = sseu->slice_mask,
+ .subslice_mask = sseu->subslice_mask[0],
+ .min_eus_per_subslice = sseu->max_eus_per_subslice,
+ .max_eus_per_subslice = sseu->max_eus_per_subslice,
+ };
+
+ return value;
+}
+
+unsigned int
+intel_sseu_subslice_total(const struct sseu_dev_info *sseu);
+
+unsigned int
+intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice);
+
+u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
+ const struct intel_sseu *req_sseu);
+
+#endif /* __INTEL_SSEU_H__ */
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 841b8e515f4d..15e90fd2cfdc 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -5,6 +5,7 @@
*/
#include "i915_drv.h"
+#include "intel_context.h"
#include "intel_workarounds.h"
/**
@@ -122,6 +123,7 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
wal->wa_count++;
wa_->val |= wa->val;
wa_->mask |= wa->mask;
+ wa_->read |= wa->read;
return;
}
}
@@ -146,9 +148,10 @@ wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
u32 val)
{
struct i915_wa wa = {
- .reg = reg,
+ .reg = reg,
.mask = mask,
- .val = val
+ .val = val,
+ .read = mask,
};
_wa_add(wal, &wa);
@@ -172,6 +175,19 @@ wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
wa_write_masked_or(wal, reg, val, val);
}
+static void
+ignore_wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
+{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = mask,
+ .val = val,
+ /* Bonkers HW, skip verifying */
+ };
+
+ _wa_add(wal, &wa);
+}
+
#define WA_SET_BIT_MASKED(addr, mask) \
wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask))
@@ -181,10 +197,9 @@ wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
#define WA_SET_FIELD_MASKED(addr, mask, value) \
wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value)))
-static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &engine->ctx_wa_list;
-
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
/* WaDisableAsyncFlipPerfMode:bdw,chv */
@@ -230,12 +245,12 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine)
GEN6_WIZ_HASHING_16x4);
}
-static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_wa_list *wal = &engine->ctx_wa_list;
- gen8_ctx_workarounds_init(engine);
+ gen8_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
@@ -258,11 +273,10 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine)
(IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
}
-static void chv_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &engine->ctx_wa_list;
-
- gen8_ctx_workarounds_init(engine);
+ gen8_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:chv */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
@@ -271,10 +285,10 @@ static void chv_ctx_workarounds_init(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
}
-static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_wa_list *wal = &engine->ctx_wa_list;
if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
@@ -369,10 +383,10 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
}
-static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
+static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_wa_list *wal = &engine->ctx_wa_list;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
@@ -409,17 +423,17 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
GEN9_IZ_HASHING(0, vals[0]));
}
-static void skl_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
- gen9_ctx_workarounds_init(engine);
- skl_tune_iz_hashing(engine);
+ gen9_ctx_workarounds_init(engine, wal);
+ skl_tune_iz_hashing(engine, wal);
}
-static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &engine->ctx_wa_list;
-
- gen9_ctx_workarounds_init(engine);
+ gen9_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:bxt */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -430,12 +444,12 @@ static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine)
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
}
-static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_wa_list *wal = &engine->ctx_wa_list;
- gen9_ctx_workarounds_init(engine);
+ gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:kbl */
if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
@@ -447,22 +461,20 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine)
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
-static void glk_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &engine->ctx_wa_list;
-
- gen9_ctx_workarounds_init(engine);
+ gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:glk */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
}
-static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &engine->ctx_wa_list;
-
- gen9_ctx_workarounds_init(engine);
+ gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:cfl */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
@@ -473,10 +485,10 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine)
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
-static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_wa_list *wal = &engine->ctx_wa_list;
/* WaForceContextSaveRestoreNonCoherent:cnl */
WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
@@ -513,10 +525,16 @@ static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
}
-static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_wa_list *wal = &engine->ctx_wa_list;
+
+ /* WaDisableBankHangMode:icl */
+ wa_write(wal,
+ GEN8_L3CNTLREG,
+ intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
+ GEN8_ERRDETBCTRL);
/* WaDisableBankHangMode:icl */
wa_write(wal,
@@ -562,33 +580,42 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
+
+ /* allow headerless messages for preemptible GPGPU context */
+ WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE,
+ GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
}
-void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
+static void
+__intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal,
+ const char *name)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_wa_list *wal = &engine->ctx_wa_list;
- wa_init_start(wal, "context");
+ if (engine->class != RENDER_CLASS)
+ return;
+
+ wa_init_start(wal, name);
if (IS_GEN(i915, 11))
- icl_ctx_workarounds_init(engine);
+ icl_ctx_workarounds_init(engine, wal);
else if (IS_CANNONLAKE(i915))
- cnl_ctx_workarounds_init(engine);
+ cnl_ctx_workarounds_init(engine, wal);
else if (IS_COFFEELAKE(i915))
- cfl_ctx_workarounds_init(engine);
+ cfl_ctx_workarounds_init(engine, wal);
else if (IS_GEMINILAKE(i915))
- glk_ctx_workarounds_init(engine);
+ glk_ctx_workarounds_init(engine, wal);
else if (IS_KABYLAKE(i915))
- kbl_ctx_workarounds_init(engine);
+ kbl_ctx_workarounds_init(engine, wal);
else if (IS_BROXTON(i915))
- bxt_ctx_workarounds_init(engine);
+ bxt_ctx_workarounds_init(engine, wal);
else if (IS_SKYLAKE(i915))
- skl_ctx_workarounds_init(engine);
+ skl_ctx_workarounds_init(engine, wal);
else if (IS_CHERRYVIEW(i915))
- chv_ctx_workarounds_init(engine);
+ chv_ctx_workarounds_init(engine, wal);
else if (IS_BROADWELL(i915))
- bdw_ctx_workarounds_init(engine);
+ bdw_ctx_workarounds_init(engine, wal);
else if (INTEL_GEN(i915) < 8)
return;
else
@@ -597,6 +624,11 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
wa_init_finish(wal);
}
+void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
+{
+ __intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
+}
+
int intel_engine_emit_ctx_wa(struct i915_request *rq)
{
struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
@@ -915,6 +947,21 @@ wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
return fw;
}
+static bool
+wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
+{
+ if ((cur ^ wa->val) & wa->read) {
+ DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
+ name, from, i915_mmio_reg_offset(wa->reg),
+ cur, cur & wa->read,
+ wa->val, wa->mask);
+
+ return false;
+ }
+
+ return true;
+}
+
static void
wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
{
@@ -933,6 +980,10 @@ wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
intel_uncore_rmw_fw(uncore, wa->reg, wa->mask, wa->val);
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ wa_verify(wa,
+ intel_uncore_read_fw(uncore, wa->reg),
+ wal->name, "application");
}
intel_uncore_forcewake_put__locked(uncore, fw);
@@ -944,20 +995,6 @@ void intel_gt_apply_workarounds(struct drm_i915_private *i915)
wa_list_apply(&i915->uncore, &i915->gt_wa_list);
}
-static bool
-wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
-{
- if ((cur ^ wa->val) & wa->mask) {
- DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
- name, from, i915_mmio_reg_offset(wa->reg), cur,
- cur & wa->mask, wa->val, wa->mask);
-
- return false;
- }
-
- return true;
-}
-
static bool wa_list_verify(struct intel_uncore *uncore,
const struct i915_wa_list *wal,
const char *from)
@@ -981,7 +1018,7 @@ bool intel_gt_verify_workarounds(struct drm_i915_private *i915,
}
static void
-whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
+whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
{
struct i915_wa wa = {
.reg = reg
@@ -990,9 +1027,16 @@ whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
return;
+ wa.reg.reg |= flags;
_wa_add(wal, &wa);
}
+static void
+whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
+{
+ whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_RW);
+}
+
static void gen9_whitelist_build(struct i915_wa_list *w)
{
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
@@ -1005,56 +1049,103 @@ static void gen9_whitelist_build(struct i915_wa_list *w)
whitelist_reg(w, GEN8_HDC_CHICKEN1);
}
-static void skl_whitelist_build(struct i915_wa_list *w)
+static void skl_whitelist_build(struct intel_engine_cs *engine)
{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ if (engine->class != RENDER_CLASS)
+ return;
+
gen9_whitelist_build(w);
/* WaDisableLSQCROPERFforOCL:skl */
whitelist_reg(w, GEN8_L3SQCREG4);
}
-static void bxt_whitelist_build(struct i915_wa_list *w)
+static void bxt_whitelist_build(struct intel_engine_cs *engine)
{
- gen9_whitelist_build(w);
+ if (engine->class != RENDER_CLASS)
+ return;
+
+ gen9_whitelist_build(&engine->whitelist);
}
-static void kbl_whitelist_build(struct i915_wa_list *w)
+static void kbl_whitelist_build(struct intel_engine_cs *engine)
{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ if (engine->class != RENDER_CLASS)
+ return;
+
gen9_whitelist_build(w);
/* WaDisableLSQCROPERFforOCL:kbl */
whitelist_reg(w, GEN8_L3SQCREG4);
}
-static void glk_whitelist_build(struct i915_wa_list *w)
+static void glk_whitelist_build(struct intel_engine_cs *engine)
{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ if (engine->class != RENDER_CLASS)
+ return;
+
gen9_whitelist_build(w);
/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
}
-static void cfl_whitelist_build(struct i915_wa_list *w)
+static void cfl_whitelist_build(struct intel_engine_cs *engine)
{
- gen9_whitelist_build(w);
+ if (engine->class != RENDER_CLASS)
+ return;
+
+ gen9_whitelist_build(&engine->whitelist);
}
-static void cnl_whitelist_build(struct i915_wa_list *w)
+static void cnl_whitelist_build(struct intel_engine_cs *engine)
{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ if (engine->class != RENDER_CLASS)
+ return;
+
/* WaEnablePreemptionGranularityControlByUMD:cnl */
whitelist_reg(w, GEN8_CS_CHICKEN1);
}
-static void icl_whitelist_build(struct i915_wa_list *w)
+static void icl_whitelist_build(struct intel_engine_cs *engine)
{
- /* WaAllowUMDToModifyHalfSliceChicken7:icl */
- whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
-
- /* WaAllowUMDToModifySamplerMode:icl */
- whitelist_reg(w, GEN10_SAMPLER_MODE);
+ struct i915_wa_list *w = &engine->whitelist;
- /* WaEnableStateCacheRedirectToCS:icl */
- whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+ switch (engine->class) {
+ case RENDER_CLASS:
+ /* WaAllowUMDToModifyHalfSliceChicken7:icl */
+ whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
+
+ /* WaAllowUMDToModifySamplerMode:icl */
+ whitelist_reg(w, GEN10_SAMPLER_MODE);
+
+ /* WaEnableStateCacheRedirectToCS:icl */
+ whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+ break;
+
+ case VIDEO_DECODE_CLASS:
+ /* hucStatusRegOffset */
+ whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
+ RING_FORCE_TO_NONPRIV_RD);
+ /* hucUKernelHdrInfoRegOffset */
+ whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
+ RING_FORCE_TO_NONPRIV_RD);
+ /* hucStatus2RegOffset */
+ whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
+ RING_FORCE_TO_NONPRIV_RD);
+ break;
+
+ default:
+ break;
+ }
}
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
@@ -1062,24 +1153,22 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *w = &engine->whitelist;
- GEM_BUG_ON(engine->id != RCS0);
-
wa_init_start(w, "whitelist");
if (IS_GEN(i915, 11))
- icl_whitelist_build(w);
+ icl_whitelist_build(engine);
else if (IS_CANNONLAKE(i915))
- cnl_whitelist_build(w);
+ cnl_whitelist_build(engine);
else if (IS_COFFEELAKE(i915))
- cfl_whitelist_build(w);
+ cfl_whitelist_build(engine);
else if (IS_GEMINILAKE(i915))
- glk_whitelist_build(w);
+ glk_whitelist_build(engine);
else if (IS_KABYLAKE(i915))
- kbl_whitelist_build(w);
+ kbl_whitelist_build(engine);
else if (IS_BROXTON(i915))
- bxt_whitelist_build(w);
+ bxt_whitelist_build(engine);
else if (IS_SKYLAKE(i915))
- skl_whitelist_build(w);
+ skl_whitelist_build(engine);
else if (INTEL_GEN(i915) <= 8)
return;
else
@@ -1123,9 +1212,10 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
/* WaPipelineFlushCoherentLines:icl */
- wa_write_or(wal,
- GEN8_L3SQCREG4,
- GEN8_LQSC_FLUSH_COHERENT_LINES);
+ ignore_wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
* Wa_1405543622:icl
@@ -1152,9 +1242,10 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
* Wa_1405733216:icl
* Formerly known as WaDisableCleanEvicts
*/
- wa_write_or(wal,
- GEN8_L3SQCREG4,
- GEN11_LQSC_CLEAN_EVICT_DISABLE);
+ ignore_wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN11_LQSC_CLEAN_EVICT_DISABLE,
+ GEN11_LQSC_CLEAN_EVICT_DISABLE);
/* WaForwardProgressSoftReset:icl */
wa_write_or(wal,
@@ -1260,6 +1351,128 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
wa_list_apply(engine->uncore, &engine->wa_list);
}
+static struct i915_vma *
+create_scratch(struct i915_address_space *vm, int count)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ unsigned int size;
+ int err;
+
+ size = round_up(count * sizeof(u32), PAGE_SIZE);
+ obj = i915_gem_object_create_internal(vm->i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0,
+ i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
+ if (err)
+ goto err_obj;
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static int
+wa_list_srm(struct i915_request *rq,
+ const struct i915_wa_list *wal,
+ struct i915_vma *vma)
+{
+ const struct i915_wa *wa;
+ unsigned int i;
+ u32 srm, *cs;
+
+ srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ if (INTEL_GEN(rq->i915) >= 8)
+ srm++;
+
+ cs = intel_ring_begin(rq, 4 * wal->count);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ *cs++ = srm;
+ *cs++ = i915_mmio_reg_offset(wa->reg);
+ *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
+ *cs++ = 0;
+ }
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int engine_wa_list_verify(struct intel_context *ce,
+ const struct i915_wa_list * const wal,
+ const char *from)
+{
+ const struct i915_wa *wa;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ unsigned int i;
+ u32 *results;
+ int err;
+
+ if (!wal->count)
+ return 0;
+
+ vma = create_scratch(&ce->engine->i915->ggtt.vm, wal->count);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_vma;
+ }
+
+ err = wa_list_srm(rq, wal, vma);
+ if (err)
+ goto err_vma;
+
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto err_vma;
+ }
+
+ results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(results)) {
+ err = PTR_ERR(results);
+ goto err_vma;
+ }
+
+ err = 0;
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ if (!wa_verify(wa, results[i], wal->name, from))
+ err = -ENXIO;
+
+ i915_gem_object_unpin_map(vma->obj);
+
+err_vma:
+ i915_vma_unpin(vma);
+ i915_vma_put(vma);
+ return err;
+}
+
+int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
+ const char *from)
+{
+ return engine_wa_list_verify(engine->kernel_context,
+ &engine->wa_list,
+ from);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/intel_workarounds.c"
+#include "selftest_workarounds.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/gt/intel_workarounds.h
index 34eee5ec511e..3761a6ee58bb 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.h
@@ -4,13 +4,17 @@
* Copyright © 2014-2018 Intel Corporation
*/
-#ifndef _I915_WORKAROUNDS_H_
-#define _I915_WORKAROUNDS_H_
+#ifndef _INTEL_WORKAROUNDS_H_
+#define _INTEL_WORKAROUNDS_H_
#include <linux/slab.h>
#include "intel_workarounds_types.h"
+struct drm_i915_private;
+struct i915_request;
+struct intel_engine_cs;
+
static inline void intel_wa_list_free(struct i915_wa_list *wal)
{
kfree(wal->list);
@@ -30,5 +34,7 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine);
void intel_engine_init_workarounds(struct intel_engine_cs *engine);
void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
+int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
+ const char *from);
#endif
diff --git a/drivers/gpu/drm/i915/intel_workarounds_types.h b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
index 30918da180ff..42ac1fb99572 100644
--- a/drivers/gpu/drm/i915/intel_workarounds_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
@@ -12,9 +12,10 @@
#include "i915_reg.h"
struct i915_wa {
- i915_reg_t reg;
- u32 mask;
- u32 val;
+ i915_reg_t reg;
+ u32 mask;
+ u32 val;
+ u32 read;
};
struct i915_wa_list {
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 61a8206ed677..086801b51441 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -22,8 +22,14 @@
*
*/
+#include "gem/i915_gem_context.h"
+
+#include "i915_drv.h"
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+
#include "mock_engine.h"
-#include "mock_request.h"
+#include "selftests/mock_request.h"
struct mock_ring {
struct intel_ring base;
@@ -140,12 +146,18 @@ static void mock_context_destroy(struct kref *ref)
static int mock_context_pin(struct intel_context *ce)
{
+ int ret;
+
if (!ce->ring) {
ce->ring = mock_ring(ce->engine);
if (!ce->ring)
return -ENOMEM;
}
+ ret = intel_context_active_acquire(ce, PIN_HIGH);
+ if (ret)
+ return ret;
+
mock_timeline_pin(ce->ring->timeline);
return 0;
}
@@ -154,6 +166,9 @@ static const struct intel_context_ops mock_context_ops = {
.pin = mock_context_pin,
.unpin = mock_context_unpin,
+ .enter = intel_context_enter_engine,
+ .exit = intel_context_exit_engine,
+
.destroy = mock_context_destroy,
};
@@ -214,17 +229,17 @@ static void mock_cancel_requests(struct intel_engine_cs *engine)
struct i915_request *request;
unsigned long flags;
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
/* Mark all submitted requests as skipped. */
- list_for_each_entry(request, &engine->timeline.requests, sched.link) {
+ list_for_each_entry(request, &engine->active.requests, sched.link) {
if (!i915_request_signaled(request))
dma_fence_set_error(&request->fence, -EIO);
i915_request_mark_complete(request);
}
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
@@ -257,29 +272,39 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.reset.finish = mock_reset_finish;
engine->base.cancel_requests = mock_cancel_requests;
- if (i915_timeline_init(i915, &engine->base.timeline, NULL))
- goto err_free;
- i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
-
- intel_engine_init_breadcrumbs(&engine->base);
-
/* fake hw queue */
spin_lock_init(&engine->hw_lock);
timer_setup(&engine->hw_delay, hw_delay_complete, 0);
INIT_LIST_HEAD(&engine->hw_queue);
- if (pin_context(i915->kernel_context, &engine->base,
- &engine->base.kernel_context))
+ return &engine->base;
+}
+
+int mock_engine_init(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ int err;
+
+ intel_engine_init_active(engine, ENGINE_MOCK);
+ intel_engine_init_breadcrumbs(engine);
+ intel_engine_init_execlists(engine);
+ intel_engine_init__pm(engine);
+
+ engine->kernel_context =
+ i915_gem_context_get_engine(i915->kernel_context, engine->id);
+ if (IS_ERR(engine->kernel_context))
goto err_breadcrumbs;
- return &engine->base;
+ err = intel_context_pin(engine->kernel_context);
+ intel_context_put(engine->kernel_context);
+ if (err)
+ goto err_breadcrumbs;
+
+ return 0;
err_breadcrumbs:
- intel_engine_fini_breadcrumbs(&engine->base);
- i915_timeline_fini(&engine->base.timeline);
-err_free:
- kfree(engine);
- return NULL;
+ intel_engine_fini_breadcrumbs(engine);
+ return -ENOMEM;
}
void mock_engine_flush(struct intel_engine_cs *engine)
@@ -304,18 +329,12 @@ void mock_engine_free(struct intel_engine_cs *engine)
{
struct mock_engine *mock =
container_of(engine, typeof(*mock), base);
- struct intel_context *ce;
GEM_BUG_ON(timer_pending(&mock->hw_delay));
- ce = fetch_and_zero(&engine->last_retired_context);
- if (ce)
- intel_context_unpin(ce);
-
intel_context_unpin(engine->kernel_context);
intel_engine_fini_breadcrumbs(engine);
- i915_timeline_fini(&engine->timeline);
kfree(engine);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.h b/drivers/gpu/drm/i915/gt/mock_engine.h
index b9cc3a245f16..3f9b698c49d2 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.h
+++ b/drivers/gpu/drm/i915/gt/mock_engine.h
@@ -29,7 +29,7 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
-#include "../intel_ringbuffer.h"
+#include "gt/intel_engine.h"
struct mock_engine {
struct intel_engine_cs base;
@@ -42,6 +42,8 @@ struct mock_engine {
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
const char *name,
int id);
+int mock_engine_init(struct intel_engine_cs *engine);
+
void mock_engine_flush(struct intel_engine_cs *engine);
void mock_engine_reset(struct intel_engine_cs *engine);
void mock_engine_free(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/selftests/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index cfaa6b296835..cfaa6b296835 100644
--- a/drivers/gpu/drm/i915/selftests/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 050bd1e19e02..1ee4c923044f 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -24,14 +24,20 @@
#include <linux/kthread.h>
-#include "../i915_selftest.h"
-#include "i915_random.h"
-#include "igt_flush_test.h"
-#include "igt_reset.h"
-#include "igt_wedge_me.h"
+#include "gem/i915_gem_context.h"
+#include "intel_engine_pm.h"
-#include "mock_context.h"
-#include "mock_drm.h"
+#include "i915_selftest.h"
+#include "selftests/i915_random.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_wedge_me.h"
+#include "selftests/igt_atomic.h"
+
+#include "selftests/mock_drm.h"
+
+#include "gem/selftests/mock_context.h"
+#include "gem/selftests/igt_gem_utils.h"
#define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */
@@ -111,24 +117,18 @@ static int move_to_active(struct i915_vma *vma,
{
int err;
+ i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, flags);
- if (err)
- return err;
+ i915_vma_unlock(vma);
- if (!i915_gem_object_has_active_reference(vma->obj)) {
- i915_gem_object_get(vma->obj);
- i915_gem_object_set_active_reference(vma->obj);
- }
-
- return 0;
+ return err;
}
static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = h->i915;
- struct i915_address_space *vm =
- h->ctx->ppgtt ? &h->ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = h->ctx->vm ?: &i915->ggtt.vm;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
unsigned int flags;
@@ -173,7 +173,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
if (err)
goto unpin_vma;
- rq = i915_request_alloc(engine, h->ctx);
+ rq = igt_request_alloc(h->ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto unpin_hws;
@@ -339,8 +339,7 @@ static int igt_hang_sanitycheck(void *arg)
timeout = 0;
igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
- timeout = i915_request_wait(rq,
- I915_WAIT_LOCKED,
+ timeout = i915_request_wait(rq, 0,
MAX_SCHEDULE_TIMEOUT);
if (i915_reset_failed(i915))
timeout = -EIO;
@@ -362,54 +361,6 @@ unlock:
return err;
}
-static int igt_global_reset(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- unsigned int reset_count;
- int err = 0;
-
- /* Check that we can issue a global GPU reset */
-
- igt_global_reset_lock(i915);
-
- reset_count = i915_reset_count(&i915->gpu_error);
-
- i915_reset(i915, ALL_ENGINES, NULL);
-
- if (i915_reset_count(&i915->gpu_error) == reset_count) {
- pr_err("No GPU reset recorded!\n");
- err = -EINVAL;
- }
-
- igt_global_reset_unlock(i915);
-
- if (i915_reset_failed(i915))
- err = -EIO;
-
- return err;
-}
-
-static int igt_wedged_reset(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- intel_wakeref_t wakeref;
-
- /* Check that we can recover a wedged device with a GPU reset */
-
- igt_global_reset_lock(i915);
- wakeref = intel_runtime_pm_get(i915);
-
- i915_gem_set_wedged(i915);
-
- GEM_BUG_ON(!i915_reset_failed(i915));
- i915_reset(i915, ALL_ENGINES, NULL);
-
- intel_runtime_pm_put(i915, wakeref);
- igt_global_reset_unlock(i915);
-
- return i915_reset_failed(i915) ? -EIO : 0;
-}
-
static bool wait_for_idle(struct intel_engine_cs *engine)
{
return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
@@ -442,7 +393,7 @@ static int igt_reset_nop(void *arg)
}
i915_gem_context_clear_bannable(ctx);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
reset_count = i915_reset_count(&i915->gpu_error);
count = 0;
do {
@@ -453,7 +404,7 @@ static int igt_reset_nop(void *arg)
for (i = 0; i < 16; i++) {
struct i915_request *rq;
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
@@ -479,19 +430,6 @@ static int igt_reset_nop(void *arg)
break;
}
- if (!i915_reset_flush(i915)) {
- struct drm_printer p =
- drm_info_printer(i915->drm.dev);
-
- pr_err("%s failed to idle after reset\n",
- engine->name);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
-
- err = -EIO;
- break;
- }
-
err = igt_flush_test(i915, 0);
if (err)
break;
@@ -502,7 +440,7 @@ static int igt_reset_nop(void *arg)
err = igt_flush_test(i915, I915_WAIT_LOCKED);
mutex_unlock(&i915->drm.struct_mutex);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
out:
mock_file_free(i915, file);
@@ -539,7 +477,7 @@ static int igt_reset_nop_engine(void *arg)
}
i915_gem_context_clear_bannable(ctx);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for_each_engine(engine, i915, id) {
unsigned int reset_count, reset_engine_count;
unsigned int count;
@@ -565,7 +503,7 @@ static int igt_reset_nop_engine(void *arg)
for (i = 0; i < 16; i++) {
struct i915_request *rq;
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
@@ -594,19 +532,6 @@ static int igt_reset_nop_engine(void *arg)
err = -EINVAL;
break;
}
-
- if (!i915_reset_flush(i915)) {
- struct drm_printer p =
- drm_info_printer(i915->drm.dev);
-
- pr_err("%s failed to idle after reset\n",
- engine->name);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
-
- err = -EIO;
- break;
- }
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
@@ -623,7 +548,7 @@ static int igt_reset_nop_engine(void *arg)
err = igt_flush_test(i915, I915_WAIT_LOCKED);
mutex_unlock(&i915->drm.struct_mutex);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
out:
mock_file_free(i915, file);
if (i915_reset_failed(i915))
@@ -669,6 +594,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
engine);
+ intel_engine_pm_get(engine);
set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
do {
if (active) {
@@ -721,21 +647,9 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
err = -EINVAL;
break;
}
-
- if (!i915_reset_flush(i915)) {
- struct drm_printer p =
- drm_info_printer(i915->drm.dev);
-
- pr_err("%s failed to idle after reset\n",
- engine->name);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
-
- err = -EIO;
- break;
- }
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ intel_engine_pm_put(engine);
if (err)
break;
@@ -835,7 +749,7 @@ static int active_engine(void *data)
struct i915_request *new;
mutex_lock(&engine->i915->drm.struct_mutex);
- new = i915_request_alloc(engine, ctx[idx]);
+ new = igt_request_alloc(ctx[idx], engine);
if (IS_ERR(new)) {
mutex_unlock(&engine->i915->drm.struct_mutex);
err = PTR_ERR(new);
@@ -942,6 +856,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
get_task_struct(tsk);
}
+ intel_engine_pm_get(engine);
set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
do {
struct i915_request *rq = NULL;
@@ -1018,6 +933,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
}
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ intel_engine_pm_put(engine);
pr_info("i915_reset_engine(%s:%s): %lu resets\n",
engine->name, test_name, count);
@@ -1069,7 +985,9 @@ unwind:
if (err)
break;
- err = igt_flush_test(i915, 0);
+ mutex_lock(&i915->drm.struct_mutex);
+ err = igt_flush_test(i915, I915_WAIT_LOCKED);
+ mutex_unlock(&i915->drm.struct_mutex);
if (err)
break;
}
@@ -1179,7 +1097,7 @@ static int igt_reset_wait(void *arg)
reset_count = fake_hangcheck(i915, ALL_ENGINES);
- timeout = i915_request_wait(rq, I915_WAIT_LOCKED, 10);
+ timeout = i915_request_wait(rq, 0, 10);
if (timeout < 0) {
pr_err("i915_request_wait failed on a stuck request: err=%ld\n",
timeout);
@@ -1327,7 +1245,9 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
}
}
+ i915_vma_lock(arg.vma);
err = i915_vma_move_to_active(arg.vma, rq, flags);
+ i915_vma_unlock(arg.vma);
if (flags & EXEC_OBJECT_NEEDS_FENCE)
i915_vma_unpin_fence(arg.vma);
@@ -1432,8 +1352,8 @@ static int igt_reset_evict_ppgtt(void *arg)
}
err = 0;
- if (ctx->ppgtt) /* aliasing == global gtt locking, covered above */
- err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm,
+ if (ctx->vm) /* aliasing == global gtt locking, covered above */
+ err = __igt_reset_evict_vma(i915, ctx->vm,
evict_vma, EXEC_OBJECT_WRITE);
out:
@@ -1681,44 +1601,8 @@ err_unlock:
return err;
}
-static void __preempt_begin(void)
-{
- preempt_disable();
-}
-
-static void __preempt_end(void)
-{
- preempt_enable();
-}
-
-static void __softirq_begin(void)
-{
- local_bh_disable();
-}
-
-static void __softirq_end(void)
-{
- local_bh_enable();
-}
-
-static void __hardirq_begin(void)
-{
- local_irq_disable();
-}
-
-static void __hardirq_end(void)
-{
- local_irq_enable();
-}
-
-struct atomic_section {
- const char *name;
- void (*critical_section_begin)(void);
- void (*critical_section_end)(void);
-};
-
static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
- const struct atomic_section *p,
+ const struct igt_atomic_section *p,
const char *mode)
{
struct tasklet_struct * const t = &engine->execlists.tasklet;
@@ -1743,7 +1627,7 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
}
static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
- const struct atomic_section *p)
+ const struct igt_atomic_section *p)
{
struct drm_i915_private *i915 = engine->i915;
struct i915_request *rq;
@@ -1781,9 +1665,7 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
struct igt_wedge_me w;
igt_wedge_on_timeout(&w, i915, HZ / 20 /* 50ms timeout*/)
- i915_request_wait(rq,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
if (i915_reset_failed(i915))
err = -EIO;
}
@@ -1794,79 +1676,43 @@ out:
return err;
}
-static void force_reset(struct drm_i915_private *i915)
+static int igt_reset_engines_atomic(void *arg)
{
- i915_gem_set_wedged(i915);
- i915_reset(i915, 0, NULL);
-}
-
-static int igt_atomic_reset(void *arg)
-{
- static const struct atomic_section phases[] = {
- { "preempt", __preempt_begin, __preempt_end },
- { "softirq", __softirq_begin, __softirq_end },
- { "hardirq", __hardirq_begin, __hardirq_end },
- { }
- };
struct drm_i915_private *i915 = arg;
- intel_wakeref_t wakeref;
+ const typeof(*igt_atomic_phases) *p;
int err = 0;
- /* Check that the resets are usable from atomic context */
+ /* Check that the engines resets are usable from atomic context */
+
+ if (!intel_has_reset_engine(i915))
+ return 0;
if (USES_GUC_SUBMISSION(i915))
- return 0; /* guc is dead; long live the guc */
+ return 0;
igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
/* Flush any requests before we get started and check basics */
- force_reset(i915);
- if (i915_reset_failed(i915))
+ if (!igt_force_reset(i915))
goto unlock;
- if (intel_has_gpu_reset(i915)) {
- const typeof(*phases) *p;
-
- for (p = phases; p->name; p++) {
- GEM_TRACE("intel_gpu_reset under %s\n", p->name);
-
- p->critical_section_begin();
- err = intel_gpu_reset(i915, ALL_ENGINES);
- p->critical_section_end();
-
- if (err) {
- pr_err("intel_gpu_reset failed under %s\n",
- p->name);
- goto out;
- }
- }
-
- force_reset(i915);
- }
-
- if (intel_has_reset_engine(i915)) {
+ for (p = igt_atomic_phases; p->name; p++) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
- const typeof(*phases) *p;
-
- for (p = phases; p->name; p++) {
- err = igt_atomic_reset_engine(engine, p);
- if (err)
- goto out;
- }
+ err = igt_atomic_reset_engine(engine, p);
+ if (err)
+ goto out;
}
}
out:
/* As we poke around the guts, do a full reset before continuing. */
- force_reset(i915);
+ igt_force_reset(i915);
unlock:
- intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
igt_global_reset_unlock(i915);
@@ -1876,21 +1722,19 @@ unlock:
int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
- SUBTEST(igt_global_reset), /* attempt to recover GPU first */
- SUBTEST(igt_wedged_reset),
SUBTEST(igt_hang_sanitycheck),
SUBTEST(igt_reset_nop),
SUBTEST(igt_reset_nop_engine),
SUBTEST(igt_reset_idle_engine),
SUBTEST(igt_reset_active_engine),
SUBTEST(igt_reset_engines),
+ SUBTEST(igt_reset_engines_atomic),
SUBTEST(igt_reset_queue),
SUBTEST(igt_reset_wait),
SUBTEST(igt_reset_evict_ggtt),
SUBTEST(igt_reset_evict_ppgtt),
SUBTEST(igt_reset_evict_fence),
SUBTEST(igt_handle_error),
- SUBTEST(igt_atomic_reset),
};
intel_wakeref_t wakeref;
bool saved_hangcheck;
@@ -1902,7 +1746,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
if (i915_terminally_wedged(i915))
return -EIO; /* we're long past hope of a successful reset */
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
drain_delayed_work(&i915->gpu_error.hangcheck_work); /* flush param */
@@ -1913,7 +1757,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
mutex_unlock(&i915->drm.struct_mutex);
i915_modparams.enable_hangcheck = saved_hangcheck;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index e8b0b5dbcb2c..401e8b539297 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -6,15 +6,18 @@
#include <linux/prime_numbers.h>
-#include "../i915_reset.h"
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_reset.h"
-#include "../i915_selftest.h"
-#include "igt_flush_test.h"
-#include "igt_live_test.h"
-#include "igt_spinner.h"
-#include "i915_random.h"
+#include "i915_selftest.h"
+#include "selftests/i915_random.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_live_test.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/lib_sw_fence.h"
-#include "mock_context.h"
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
static int live_sanitycheck(void *arg)
{
@@ -30,7 +33,7 @@ static int live_sanitycheck(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (igt_spinner_init(&spin, i915))
goto err_unlock;
@@ -71,7 +74,7 @@ err_spin:
igt_spinner_fini(&spin);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -94,7 +97,7 @@ static int live_busywait_preempt(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
ctx_hi = kernel_context(i915);
if (!ctx_hi)
@@ -152,7 +155,7 @@ static int live_busywait_preempt(void *arg)
* fails, we hang instead.
*/
- lo = i915_request_alloc(engine, ctx_lo);
+ lo = igt_request_alloc(ctx_lo, engine);
if (IS_ERR(lo)) {
err = PTR_ERR(lo);
goto err_vma;
@@ -189,14 +192,14 @@ static int live_busywait_preempt(void *arg)
}
/* Low priority request should be busywaiting now */
- if (i915_request_wait(lo, I915_WAIT_LOCKED, 1) != -ETIME) {
+ if (i915_request_wait(lo, 0, 1) != -ETIME) {
pr_err("%s: Busywaiting request did not!\n",
engine->name);
err = -EIO;
goto err_vma;
}
- hi = i915_request_alloc(engine, ctx_hi);
+ hi = igt_request_alloc(ctx_hi, engine);
if (IS_ERR(hi)) {
err = PTR_ERR(hi);
goto err_vma;
@@ -217,7 +220,7 @@ static int live_busywait_preempt(void *arg)
intel_ring_advance(hi, cs);
i915_request_add(hi);
- if (i915_request_wait(lo, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ if (i915_request_wait(lo, 0, HZ / 5) < 0) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("%s: Failed to preempt semaphore busywait!\n",
@@ -252,7 +255,7 @@ err_ctx_hi:
err_unlock:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -274,7 +277,7 @@ static int live_preempt(void *arg)
pr_err("Logical preemption supported, but not exposed\n");
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
@@ -359,7 +362,7 @@ err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -379,7 +382,7 @@ static int live_late_preempt(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
@@ -463,7 +466,7 @@ err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -529,7 +532,7 @@ static int live_suppress_self_preempt(void *arg)
return 0; /* presume black blox */
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (preempt_client_init(i915, &a))
goto err_unlock;
@@ -603,7 +606,7 @@ err_client_a:
err_unlock:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -641,14 +644,19 @@ static struct i915_request *dummy_request(struct intel_engine_cs *engine)
GEM_BUG_ON(i915_request_completed(rq));
i915_sw_fence_init(&rq->submit, dummy_notify);
- i915_sw_fence_commit(&rq->submit);
+ set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
return rq;
}
static void dummy_request_free(struct i915_request *dummy)
{
+ /* We have to fake the CS interrupt to kick the next request */
+ i915_sw_fence_commit(&dummy->submit);
+
i915_request_mark_complete(dummy);
+ dma_fence_signal(&dummy->fence);
+
i915_sched_node_fini(&dummy->sched);
i915_sw_fence_fini(&dummy->submit);
@@ -675,7 +683,7 @@ static int live_suppress_wait_preempt(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
goto err_unlock;
@@ -731,7 +739,6 @@ static int live_suppress_wait_preempt(void *arg)
GEM_BUG_ON(!i915_request_started(rq[0]));
if (i915_request_wait(rq[depth],
- I915_WAIT_LOCKED |
I915_WAIT_PRIORITY,
1) != -ETIME) {
pr_err("%s: Waiter depth:%d completed!\n",
@@ -768,7 +775,7 @@ err_client_0:
err_unlock:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -799,7 +806,7 @@ static int live_chain_preempt(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (preempt_client_init(i915, &hi))
goto err_unlock;
@@ -833,7 +840,7 @@ static int live_chain_preempt(void *arg)
__func__, engine->name, ring_size);
igt_spinner_end(&lo.spin);
- if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
pr_err("Timed out waiting to flush %s\n", engine->name);
goto err_wedged;
}
@@ -861,20 +868,20 @@ static int live_chain_preempt(void *arg)
i915_request_add(rq);
for (i = 0; i < count; i++) {
- rq = i915_request_alloc(engine, lo.ctx);
+ rq = igt_request_alloc(lo.ctx, engine);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
}
- rq = i915_request_alloc(engine, hi.ctx);
+ rq = igt_request_alloc(hi.ctx, engine);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
engine->schedule(rq, &attr);
igt_spinner_end(&hi.spin);
- if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
drm_info_printer(i915->drm.dev);
@@ -886,11 +893,11 @@ static int live_chain_preempt(void *arg)
}
igt_spinner_end(&lo.spin);
- rq = i915_request_alloc(engine, lo.ctx);
+ rq = igt_request_alloc(lo.ctx, engine);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
- if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
drm_info_printer(i915->drm.dev);
@@ -916,7 +923,7 @@ err_client_hi:
err_unlock:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -945,7 +952,7 @@ static int live_preempt_hang(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
@@ -1042,7 +1049,7 @@ err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -1082,7 +1089,7 @@ static int smoke_submit(struct preempt_smoke *smoke,
int err = 0;
if (batch) {
- vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
+ vma = i915_vma_instance(batch, ctx->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -1093,18 +1100,20 @@ static int smoke_submit(struct preempt_smoke *smoke,
ctx->sched.priority = prio;
- rq = i915_request_alloc(smoke->engine, ctx);
+ rq = igt_request_alloc(ctx, smoke->engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto unpin;
}
if (vma) {
+ i915_vma_lock(vma);
err = rq->engine->emit_bb_start(rq,
vma->node.start,
PAGE_SIZE, 0);
if (!err)
err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
}
i915_request_add(rq);
@@ -1246,7 +1255,7 @@ static int live_preempt_smoke(void *arg)
return -ENOMEM;
mutex_lock(&smoke.i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(smoke.i915);
+ wakeref = intel_runtime_pm_get(&smoke.i915->runtime_pm);
smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
if (IS_ERR(smoke.batch)) {
@@ -1299,13 +1308,506 @@ err_ctx:
err_batch:
i915_gem_object_put(smoke.batch);
err_unlock:
- intel_runtime_pm_put(smoke.i915, wakeref);
+ intel_runtime_pm_put(&smoke.i915->runtime_pm, wakeref);
mutex_unlock(&smoke.i915->drm.struct_mutex);
kfree(smoke.contexts);
return err;
}
+static int nop_virtual_engine(struct drm_i915_private *i915,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling,
+ unsigned int nctx,
+ unsigned int flags)
+#define CHAIN BIT(0)
+{
+ IGT_TIMEOUT(end_time);
+ struct i915_request *request[16];
+ struct i915_gem_context *ctx[16];
+ struct intel_context *ve[16];
+ unsigned long n, prime, nc;
+ struct igt_live_test t;
+ ktime_t times[2] = {};
+ int err;
+
+ GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx));
+
+ for (n = 0; n < nctx; n++) {
+ ctx[n] = kernel_context(i915);
+ if (!ctx[n]) {
+ err = -ENOMEM;
+ nctx = n;
+ goto out;
+ }
+
+ ve[n] = intel_execlists_create_virtual(ctx[n],
+ siblings, nsibling);
+ if (IS_ERR(ve[n])) {
+ kernel_context_close(ctx[n]);
+ err = PTR_ERR(ve[n]);
+ nctx = n;
+ goto out;
+ }
+
+ err = intel_context_pin(ve[n]);
+ if (err) {
+ intel_context_put(ve[n]);
+ kernel_context_close(ctx[n]);
+ nctx = n;
+ goto out;
+ }
+ }
+
+ err = igt_live_test_begin(&t, i915, __func__, ve[0]->engine->name);
+ if (err)
+ goto out;
+
+ for_each_prime_number_from(prime, 1, 8192) {
+ times[1] = ktime_get_raw();
+
+ if (flags & CHAIN) {
+ for (nc = 0; nc < nctx; nc++) {
+ for (n = 0; n < prime; n++) {
+ request[nc] =
+ i915_request_create(ve[nc]);
+ if (IS_ERR(request[nc])) {
+ err = PTR_ERR(request[nc]);
+ goto out;
+ }
+
+ i915_request_add(request[nc]);
+ }
+ }
+ } else {
+ for (n = 0; n < prime; n++) {
+ for (nc = 0; nc < nctx; nc++) {
+ request[nc] =
+ i915_request_create(ve[nc]);
+ if (IS_ERR(request[nc])) {
+ err = PTR_ERR(request[nc]);
+ goto out;
+ }
+
+ i915_request_add(request[nc]);
+ }
+ }
+ }
+
+ for (nc = 0; nc < nctx; nc++) {
+ if (i915_request_wait(request[nc], 0, HZ / 10) < 0) {
+ pr_err("%s(%s): wait for %llx:%lld timed out\n",
+ __func__, ve[0]->engine->name,
+ request[nc]->fence.context,
+ request[nc]->fence.seqno);
+
+ GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
+ __func__, ve[0]->engine->name,
+ request[nc]->fence.context,
+ request[nc]->fence.seqno);
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ break;
+ }
+ }
+
+ times[1] = ktime_sub(ktime_get_raw(), times[1]);
+ if (prime == 1)
+ times[0] = times[1];
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+ }
+
+ err = igt_live_test_end(&t);
+ if (err)
+ goto out;
+
+ pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
+ nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
+ prime, div64_u64(ktime_to_ns(times[1]), prime));
+
+out:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ for (nc = 0; nc < nctx; nc++) {
+ intel_context_unpin(ve[nc]);
+ intel_context_put(ve[nc]);
+ kernel_context_close(ctx[nc]);
+ }
+ return err;
+}
+
+static int live_virtual_engine(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned int class, inst;
+ int err = -ENODEV;
+
+ if (USES_GUC_SUBMISSION(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ for_each_engine(engine, i915, id) {
+ err = nop_virtual_engine(i915, &engine, 1, 1, 0);
+ if (err) {
+ pr_err("Failed to wrap engine %s: err=%d\n",
+ engine->name, err);
+ goto out_unlock;
+ }
+ }
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, n;
+
+ nsibling = 0;
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!i915->engine_class[class][inst])
+ continue;
+
+ siblings[nsibling++] = i915->engine_class[class][inst];
+ }
+ if (nsibling < 2)
+ continue;
+
+ for (n = 1; n <= nsibling + 1; n++) {
+ err = nop_virtual_engine(i915, siblings, nsibling,
+ n, 0);
+ if (err)
+ goto out_unlock;
+ }
+
+ err = nop_virtual_engine(i915, siblings, nsibling, n, CHAIN);
+ if (err)
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int mask_virtual_engine(struct drm_i915_private *i915,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
+ struct i915_gem_context *ctx;
+ struct intel_context *ve;
+ struct igt_live_test t;
+ unsigned int n;
+ int err;
+
+ /*
+ * Check that by setting the execution mask on a request, we can
+ * restrict it to our desired engine within the virtual engine.
+ */
+
+ ctx = kernel_context(i915);
+ if (!ctx)
+ return -ENOMEM;
+
+ ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_close;
+ }
+
+ err = intel_context_pin(ve);
+ if (err)
+ goto out_put;
+
+ err = igt_live_test_begin(&t, i915, __func__, ve->engine->name);
+ if (err)
+ goto out_unpin;
+
+ for (n = 0; n < nsibling; n++) {
+ request[n] = i915_request_create(ve);
+ if (IS_ERR(request[n])) {
+ err = PTR_ERR(request[n]);
+ nsibling = n;
+ goto out;
+ }
+
+ /* Reverse order as it's more likely to be unnatural */
+ request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
+
+ i915_request_get(request[n]);
+ i915_request_add(request[n]);
+ }
+
+ for (n = 0; n < nsibling; n++) {
+ if (i915_request_wait(request[n], 0, HZ / 10) < 0) {
+ pr_err("%s(%s): wait for %llx:%lld timed out\n",
+ __func__, ve->engine->name,
+ request[n]->fence.context,
+ request[n]->fence.seqno);
+
+ GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
+ __func__, ve->engine->name,
+ request[n]->fence.context,
+ request[n]->fence.seqno);
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto out;
+ }
+
+ if (request[n]->engine != siblings[nsibling - n - 1]) {
+ pr_err("Executed on wrong sibling '%s', expected '%s'\n",
+ request[n]->engine->name,
+ siblings[nsibling - n - 1]->name);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ err = igt_live_test_end(&t);
+ if (err)
+ goto out;
+
+out:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ for (n = 0; n < nsibling; n++)
+ i915_request_put(request[n]);
+
+out_unpin:
+ intel_context_unpin(ve);
+out_put:
+ intel_context_put(ve);
+out_close:
+ kernel_context_close(ctx);
+ return err;
+}
+
+static int live_virtual_mask(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class, inst;
+ int err = 0;
+
+ if (USES_GUC_SUBMISSION(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ unsigned int nsibling;
+
+ nsibling = 0;
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!i915->engine_class[class][inst])
+ break;
+
+ siblings[nsibling++] = i915->engine_class[class][inst];
+ }
+ if (nsibling < 2)
+ continue;
+
+ err = mask_virtual_engine(i915, siblings, nsibling);
+ if (err)
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int bond_virtual_engine(struct drm_i915_private *i915,
+ unsigned int class,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling,
+ unsigned int flags)
+#define BOND_SCHEDULE BIT(0)
+{
+ struct intel_engine_cs *master;
+ struct i915_gem_context *ctx;
+ struct i915_request *rq[16];
+ enum intel_engine_id id;
+ unsigned long n;
+ int err;
+
+ GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
+
+ ctx = kernel_context(i915);
+ if (!ctx)
+ return -ENOMEM;
+
+ err = 0;
+ rq[0] = ERR_PTR(-ENOMEM);
+ for_each_engine(master, i915, id) {
+ struct i915_sw_fence fence = {};
+
+ if (master->class == class)
+ continue;
+
+ memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
+
+ rq[0] = igt_request_alloc(ctx, master);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ goto out;
+ }
+ i915_request_get(rq[0]);
+
+ if (flags & BOND_SCHEDULE) {
+ onstack_fence_init(&fence);
+ err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit,
+ &fence,
+ GFP_KERNEL);
+ }
+ i915_request_add(rq[0]);
+ if (err < 0)
+ goto out;
+
+ for (n = 0; n < nsibling; n++) {
+ struct intel_context *ve;
+
+ ve = intel_execlists_create_virtual(ctx,
+ siblings,
+ nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+
+ err = intel_virtual_engine_attach_bond(ve->engine,
+ master,
+ siblings[n]);
+ if (err) {
+ intel_context_put(ve);
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+
+ err = intel_context_pin(ve);
+ intel_context_put(ve);
+ if (err) {
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+
+ rq[n + 1] = i915_request_create(ve);
+ intel_context_unpin(ve);
+ if (IS_ERR(rq[n + 1])) {
+ err = PTR_ERR(rq[n + 1]);
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+ i915_request_get(rq[n + 1]);
+
+ err = i915_request_await_execution(rq[n + 1],
+ &rq[0]->fence,
+ ve->engine->bond_execute);
+ i915_request_add(rq[n + 1]);
+ if (err < 0) {
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+ }
+ onstack_fence_fini(&fence);
+
+ if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
+ pr_err("Master request did not execute (on %s)!\n",
+ rq[0]->engine->name);
+ err = -EIO;
+ goto out;
+ }
+
+ for (n = 0; n < nsibling; n++) {
+ if (i915_request_wait(rq[n + 1], 0,
+ MAX_SCHEDULE_TIMEOUT) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq[n + 1]->engine != siblings[n]) {
+ pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n",
+ siblings[n]->name,
+ rq[n + 1]->engine->name,
+ rq[0]->engine->name);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ for (n = 0; !IS_ERR(rq[n]); n++)
+ i915_request_put(rq[n]);
+ rq[0] = ERR_PTR(-ENOMEM);
+ }
+
+out:
+ for (n = 0; !IS_ERR(rq[n]); n++)
+ i915_request_put(rq[n]);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ kernel_context_close(ctx);
+ return err;
+}
+
+static int live_virtual_bond(void *arg)
+{
+ static const struct phase {
+ const char *name;
+ unsigned int flags;
+ } phases[] = {
+ { "", 0 },
+ { "schedule", BOND_SCHEDULE },
+ { },
+ };
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class, inst;
+ int err = 0;
+
+ if (USES_GUC_SUBMISSION(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ const struct phase *p;
+ int nsibling;
+
+ nsibling = 0;
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!i915->engine_class[class][inst])
+ break;
+
+ GEM_BUG_ON(nsibling == ARRAY_SIZE(siblings));
+ siblings[nsibling++] = i915->engine_class[class][inst];
+ }
+ if (nsibling < 2)
+ continue;
+
+ for (p = phases; p->name; p++) {
+ err = bond_virtual_engine(i915,
+ class, siblings, nsibling,
+ p->flags);
+ if (err) {
+ pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
+ __func__, p->name, class, nsibling, err);
+ goto out_unlock;
+ }
+ }
+ }
+
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
@@ -1318,6 +1820,9 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_chain_preempt),
SUBTEST(live_preempt_hang),
SUBTEST(live_preempt_smoke),
+ SUBTEST(live_virtual_engine),
+ SUBTEST(live_virtual_mask),
+ SUBTEST(live_virtual_bond),
};
if (!HAS_EXECLISTS(i915))
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
new file mode 100644
index 000000000000..89da9e7cc1ba
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_atomic.h"
+
+static int igt_global_reset(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ unsigned int reset_count;
+ int err = 0;
+
+ /* Check that we can issue a global GPU reset */
+
+ igt_global_reset_lock(i915);
+
+ reset_count = i915_reset_count(&i915->gpu_error);
+
+ i915_reset(i915, ALL_ENGINES, NULL);
+
+ if (i915_reset_count(&i915->gpu_error) == reset_count) {
+ pr_err("No GPU reset recorded!\n");
+ err = -EINVAL;
+ }
+
+ igt_global_reset_unlock(i915);
+
+ if (i915_reset_failed(i915))
+ err = -EIO;
+
+ return err;
+}
+
+static int igt_wedged_reset(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ intel_wakeref_t wakeref;
+
+ /* Check that we can recover a wedged device with a GPU reset */
+
+ igt_global_reset_lock(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ i915_gem_set_wedged(i915);
+
+ GEM_BUG_ON(!i915_reset_failed(i915));
+ i915_reset(i915, ALL_ENGINES, NULL);
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ igt_global_reset_unlock(i915);
+
+ return i915_reset_failed(i915) ? -EIO : 0;
+}
+
+static int igt_atomic_reset(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ const typeof(*igt_atomic_phases) *p;
+ int err = 0;
+
+ /* Check that the resets are usable from atomic context */
+
+ igt_global_reset_lock(i915);
+ mutex_lock(&i915->drm.struct_mutex);
+
+ /* Flush any requests before we get started and check basics */
+ if (!igt_force_reset(i915))
+ goto unlock;
+
+ for (p = igt_atomic_phases; p->name; p++) {
+ GEM_TRACE("intel_gpu_reset under %s\n", p->name);
+
+ p->critical_section_begin();
+ reset_prepare(i915);
+ err = intel_gpu_reset(i915, ALL_ENGINES);
+ reset_finish(i915);
+ p->critical_section_end();
+
+ if (err) {
+ pr_err("intel_gpu_reset failed under %s\n", p->name);
+ break;
+ }
+ }
+
+ /* As we poke around the guts, do a full reset before continuing. */
+ igt_force_reset(i915);
+
+unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ igt_global_reset_unlock(i915);
+
+ return err;
+}
+
+int intel_reset_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_global_reset), /* attempt to recover GPU first */
+ SUBTEST(igt_wedged_reset),
+ SUBTEST(igt_atomic_reset),
+ };
+ intel_wakeref_t wakeref;
+ int err = 0;
+
+ if (!intel_has_gpu_reset(i915))
+ return 0;
+
+ if (i915_terminally_wedged(i915))
+ return -EIO; /* we're long past hope of a successful reset */
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ err = i915_subtests(tests, i915);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 567b6f8dae86..9eaf030affd0 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -4,15 +4,18 @@
* Copyright © 2018 Intel Corporation
*/
-#include "../i915_selftest.h"
-#include "../i915_reset.h"
+#include "gem/i915_gem_pm.h"
+#include "i915_selftest.h"
+#include "intel_reset.h"
-#include "igt_flush_test.h"
-#include "igt_reset.h"
-#include "igt_spinner.h"
-#include "igt_wedge_me.h"
-#include "mock_context.h"
-#include "mock_drm.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/igt_wedge_me.h"
+#include "selftests/mock_drm.h"
+
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
static const struct wo_register {
enum intel_platform platform;
@@ -21,12 +24,13 @@ static const struct wo_register {
{ INTEL_GEMINILAKE, 0x731c }
};
-#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4)
+#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 8)
struct wa_lists {
struct i915_wa_list gt_wa_list;
struct {
char name[REF_NAME_MAX];
struct i915_wa_list wa_list;
+ struct i915_wa_list ctx_wa_list;
} engine[I915_NUM_ENGINES];
};
@@ -51,6 +55,12 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
wa_init_start(wal, name);
engine_init_workarounds(engine, wal);
wa_init_finish(wal);
+
+ snprintf(name, REF_NAME_MAX, "%s_CTX_REF", engine->name);
+
+ __intel_engine_init_ctx_wa(engine,
+ &lists->engine[id].ctx_wa_list,
+ name);
}
}
@@ -71,7 +81,6 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
{
const u32 base = engine->mmio_base;
struct drm_i915_gem_object *result;
- intel_wakeref_t wakeref;
struct i915_request *rq;
struct i915_vma *vma;
u32 srm, *cs;
@@ -103,15 +112,15 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
if (err)
goto err_obj;
- rq = ERR_PTR(-ENODEV);
- with_intel_runtime_pm(engine->i915, wakeref)
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_pin;
}
+ i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
if (err)
goto err_req;
@@ -133,9 +142,6 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
}
intel_ring_advance(rq, cs);
- i915_gem_object_get(result);
- i915_gem_object_set_active_reference(result);
-
i915_request_add(rq);
i915_vma_unpin(vma);
@@ -188,8 +194,10 @@ static int check_whitelist(struct i915_gem_context *ctx,
return PTR_ERR(results);
err = 0;
+ i915_gem_object_lock(results);
igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
err = i915_gem_object_set_to_cpu_domain(results, false);
+ i915_gem_object_unlock(results);
if (i915_terminally_wedged(ctx->i915))
err = -EIO;
if (err)
@@ -248,7 +256,7 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
rq = ERR_PTR(-ENODEV);
- with_intel_runtime_pm(engine->i915, wakeref)
+ with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref)
rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
kernel_context_close(ctx);
@@ -304,7 +312,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
if (err)
goto out;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = reset(engine);
igt_spinner_end(&spin);
@@ -340,49 +348,6 @@ out:
return err;
}
-static struct i915_vma *create_scratch(struct i915_gem_context *ctx)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- void *ptr;
- int err;
-
- obj = i915_gem_object_create_internal(ctx->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
-
- ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(ptr)) {
- err = PTR_ERR(ptr);
- goto err_obj;
- }
- memset(ptr, 0xc5, PAGE_SIZE);
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
-
- vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto err_obj;
-
- err = i915_gem_object_set_to_cpu_domain(obj, false);
- if (err)
- goto err_obj;
-
- return vma;
-
-err_obj:
- i915_gem_object_put(obj);
- return ERR_PTR(err);
-}
-
static struct i915_vma *create_batch(struct i915_gem_context *ctx)
{
struct drm_i915_gem_object *obj;
@@ -393,7 +358,7 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx)
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ vma = i915_vma_instance(obj, ctx->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -403,10 +368,6 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx)
if (err)
goto err_obj;
- err = i915_gem_object_set_to_wc_domain(obj, true);
- if (err)
- goto err_obj;
-
return vma;
err_obj:
@@ -441,6 +402,29 @@ static bool wo_register(struct intel_engine_cs *engine, u32 reg)
return false;
}
+static bool ro_register(u32 reg)
+{
+ if (reg & RING_FORCE_TO_NONPRIV_RD)
+ return true;
+
+ return false;
+}
+
+static int whitelist_writable_count(struct intel_engine_cs *engine)
+{
+ int count = engine->whitelist.count;
+ int i;
+
+ for (i = 0; i < engine->whitelist.count; i++) {
+ u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+
+ if (ro_register(reg))
+ count--;
+ }
+
+ return count;
+}
+
static int check_dirty_whitelist(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
@@ -475,7 +459,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
int err = 0, i, v;
u32 *cs, *results;
- scratch = create_scratch(ctx);
+ scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
@@ -496,6 +480,9 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
if (wo_register(engine, reg))
continue;
+ if (ro_register(reg))
+ continue;
+
srm = MI_STORE_REGISTER_MEM;
lrm = MI_LOAD_REGISTER_MEM;
if (INTEL_GEN(ctx->i915) >= 8)
@@ -557,7 +544,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
i915_gem_object_unpin_map(batch->obj);
i915_gem_chipset_flush(ctx->i915);
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_batch;
@@ -580,7 +567,7 @@ err_request:
if (err)
goto out_batch;
- if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("%s: Futzing %x timedout; cancelling test\n",
engine->name, reg);
i915_gem_set_wedged(ctx->i915);
@@ -675,7 +662,7 @@ static int live_dirty_whitelist(void *arg)
if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
return 0;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
mutex_unlock(&i915->drm.struct_mutex);
file = mock_file(i915);
@@ -705,7 +692,7 @@ out_file:
mock_file_free(i915, file);
mutex_lock(&i915->drm.struct_mutex);
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return err;
}
@@ -743,26 +730,352 @@ out:
return err;
}
-static bool verify_gt_engine_wa(struct drm_i915_private *i915,
- struct wa_lists *lists, const char *str)
+static int read_whitelisted_registers(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct i915_vma *results)
+{
+ struct i915_request *rq;
+ int i, err = 0;
+ u32 srm, *cs;
+
+ rq = igt_request_alloc(ctx, engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ srm = MI_STORE_REGISTER_MEM;
+ if (INTEL_GEN(ctx->i915) >= 8)
+ srm++;
+
+ cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_req;
+ }
+
+ for (i = 0; i < engine->whitelist.count; i++) {
+ u64 offset = results->node.start + sizeof(u32) * i;
+ u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+
+ /* Clear RD only and WR only flags */
+ reg &= ~(RING_FORCE_TO_NONPRIV_RD | RING_FORCE_TO_NONPRIV_WR);
+
+ *cs++ = srm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ }
+ intel_ring_advance(rq, cs);
+
+err_req:
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -EIO;
+
+ return err;
+}
+
+static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+ struct i915_vma *batch;
+ int i, err = 0;
+ u32 *cs;
+
+ batch = create_batch(ctx);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_batch;
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
+ for (i = 0; i < engine->whitelist.count; i++) {
+ u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+
+ if (ro_register(reg))
+ continue;
+
+ *cs++ = reg;
+ *cs++ = 0xffffffff;
+ }
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_chipset_flush(ctx->i915);
+
+ rq = igt_request_alloc(ctx, engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
+ err = engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto err_request;
+ }
+
+ /* Perform the writes from an unprivileged "user" batch */
+ err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
+
+err_request:
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -EIO;
+
+err_unpin:
+ i915_gem_object_unpin_map(batch->obj);
+err_batch:
+ i915_vma_unpin_and_release(&batch, 0);
+ return err;
+}
+
+struct regmask {
+ i915_reg_t reg;
+ unsigned long gen_mask;
+};
+
+static bool find_reg(struct drm_i915_private *i915,
+ i915_reg_t reg,
+ const struct regmask *tbl,
+ unsigned long count)
+{
+ u32 offset = i915_mmio_reg_offset(reg);
+
+ while (count--) {
+ if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
+ i915_mmio_reg_offset(tbl->reg) == offset)
+ return true;
+ tbl++;
+ }
+
+ return false;
+}
+
+static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ /* Alas, we must pardon some whitelists. Mistakes already made */
+ static const struct regmask pardon[] = {
+ { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
+ { GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
+ };
+
+ return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
+}
+
+static bool result_eq(struct intel_engine_cs *engine,
+ u32 a, u32 b, i915_reg_t reg)
+{
+ if (a != b && !pardon_reg(engine->i915, reg)) {
+ pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
+ i915_mmio_reg_offset(reg), a, b);
+ return false;
+ }
+
+ return true;
+}
+
+static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ /* Some registers do not seem to behave and our writes unreadable */
+ static const struct regmask wo[] = {
+ { GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
+ };
+
+ return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
+}
+
+static bool result_neq(struct intel_engine_cs *engine,
+ u32 a, u32 b, i915_reg_t reg)
+{
+ if (a == b && !writeonly_reg(engine->i915, reg)) {
+ pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
+ i915_mmio_reg_offset(reg), a);
+ return false;
+ }
+
+ return true;
+}
+
+static int
+check_whitelisted_registers(struct intel_engine_cs *engine,
+ struct i915_vma *A,
+ struct i915_vma *B,
+ bool (*fn)(struct intel_engine_cs *engine,
+ u32 a, u32 b,
+ i915_reg_t reg))
+{
+ u32 *a, *b;
+ int i, err;
+
+ a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
+ b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
+ if (IS_ERR(b)) {
+ err = PTR_ERR(b);
+ goto err_a;
+ }
+
+ err = 0;
+ for (i = 0; i < engine->whitelist.count; i++) {
+ if (!fn(engine, a[i], b[i], engine->whitelist.list[i].reg))
+ err = -EINVAL;
+ }
+
+ i915_gem_object_unpin_map(B->obj);
+err_a:
+ i915_gem_object_unpin_map(A->obj);
+ return err;
+}
+
+static int live_isolated_whitelist(void *arg)
{
+ struct drm_i915_private *i915 = arg;
+ struct {
+ struct i915_gem_context *ctx;
+ struct i915_vma *scratch[2];
+ } client[2] = {};
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ int i, err = 0;
+
+ /*
+ * Check that a write into a whitelist register works, but
+ * invisible to a second context.
+ */
+
+ if (!intel_engines_has_context_isolation(i915))
+ return 0;
+
+ if (!i915->kernel_context->vm)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct i915_gem_context *c;
+
+ c = kernel_context(i915);
+ if (IS_ERR(c)) {
+ err = PTR_ERR(c);
+ goto err;
+ }
+
+ client[i].scratch[0] = create_scratch(c->vm, 1024);
+ if (IS_ERR(client[i].scratch[0])) {
+ err = PTR_ERR(client[i].scratch[0]);
+ kernel_context_close(c);
+ goto err;
+ }
+
+ client[i].scratch[1] = create_scratch(c->vm, 1024);
+ if (IS_ERR(client[i].scratch[1])) {
+ err = PTR_ERR(client[i].scratch[1]);
+ i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+ kernel_context_close(c);
+ goto err;
+ }
+
+ client[i].ctx = c;
+ }
+
+ for_each_engine(engine, i915, id) {
+ if (!whitelist_writable_count(engine))
+ continue;
+
+ /* Read default values */
+ err = read_whitelisted_registers(client[0].ctx, engine,
+ client[0].scratch[0]);
+ if (err)
+ goto err;
+
+ /* Try to overwrite registers (should only affect ctx0) */
+ err = scrub_whitelisted_registers(client[0].ctx, engine);
+ if (err)
+ goto err;
+
+ /* Read values from ctx1, we expect these to be defaults */
+ err = read_whitelisted_registers(client[1].ctx, engine,
+ client[1].scratch[0]);
+ if (err)
+ goto err;
+
+ /* Verify that both reads return the same default values */
+ err = check_whitelisted_registers(engine,
+ client[0].scratch[0],
+ client[1].scratch[0],
+ result_eq);
+ if (err)
+ goto err;
+
+ /* Read back the updated values in ctx0 */
+ err = read_whitelisted_registers(client[0].ctx, engine,
+ client[0].scratch[1]);
+ if (err)
+ goto err;
+
+ /* User should be granted privilege to overwhite regs */
+ err = check_whitelisted_registers(engine,
+ client[0].scratch[0],
+ client[0].scratch[1],
+ result_neq);
+ if (err)
+ goto err;
+ }
+
+err:
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (!client[i].ctx)
+ break;
+
+ i915_vma_unpin_and_release(&client[i].scratch[1], 0);
+ i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+ kernel_context_close(client[i].ctx);
+ }
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ return err;
+}
+
+static bool
+verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
+ const char *str)
+{
+ struct drm_i915_private *i915 = ctx->i915;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
bool ok = true;
ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
- for_each_engine(engine, i915, id)
- ok &= wa_list_verify(engine->uncore,
- &lists->engine[id].wa_list, str);
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ enum intel_engine_id id = ce->engine->id;
+
+ ok &= engine_wa_list_verify(ce,
+ &lists->engine[id].wa_list,
+ str) == 0;
+
+ ok &= engine_wa_list_verify(ce,
+ &lists->engine[id].ctx_wa_list,
+ str) == 0;
+ }
+ i915_gem_context_unlock_engines(ctx);
return ok;
}
static int
-live_gpu_reset_gt_engine_workarounds(void *arg)
+live_gpu_reset_workarounds(void *arg)
{
struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx;
intel_wakeref_t wakeref;
struct wa_lists lists;
bool ok;
@@ -770,31 +1083,36 @@ live_gpu_reset_gt_engine_workarounds(void *arg)
if (!intel_has_gpu_reset(i915))
return 0;
+ ctx = kernel_context(i915);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
pr_info("Verifying after GPU reset...\n");
igt_global_reset_lock(i915);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
reference_lists_init(i915, &lists);
- ok = verify_gt_engine_wa(i915, &lists, "before reset");
+ ok = verify_wa_lists(ctx, &lists, "before reset");
if (!ok)
goto out;
i915_reset(i915, ALL_ENGINES, "live_workarounds");
- ok = verify_gt_engine_wa(i915, &lists, "after reset");
+ ok = verify_wa_lists(ctx, &lists, "after reset");
out:
+ kernel_context_close(ctx);
reference_lists_fini(i915, &lists);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
igt_global_reset_unlock(i915);
return ok ? 0 : -ESRCH;
}
static int
-live_engine_reset_gt_engine_workarounds(void *arg)
+live_engine_reset_workarounds(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
@@ -814,7 +1132,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
return PTR_ERR(ctx);
igt_global_reset_lock(i915);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
reference_lists_init(i915, &lists);
@@ -823,7 +1141,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
pr_info("Verifying after %s reset...\n", engine->name);
- ok = verify_gt_engine_wa(i915, &lists, "before reset");
+ ok = verify_wa_lists(ctx, &lists, "before reset");
if (!ok) {
ret = -ESRCH;
goto err;
@@ -831,7 +1149,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
i915_reset_engine(engine, "live_workarounds");
- ok = verify_gt_engine_wa(i915, &lists, "after idle reset");
+ ok = verify_wa_lists(ctx, &lists, "after idle reset");
if (!ok) {
ret = -ESRCH;
goto err;
@@ -862,7 +1180,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
igt_spinner_end(&spin);
igt_spinner_fini(&spin);
- ok = verify_gt_engine_wa(i915, &lists, "after busy reset");
+ ok = verify_wa_lists(ctx, &lists, "after busy reset");
if (!ok) {
ret = -ESRCH;
goto err;
@@ -871,7 +1189,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
err:
reference_lists_fini(i915, &lists);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
igt_global_reset_unlock(i915);
kernel_context_close(ctx);
@@ -885,8 +1203,9 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
static const struct i915_subtest tests[] = {
SUBTEST(live_dirty_whitelist),
SUBTEST(live_reset_whitelist),
- SUBTEST(live_gpu_reset_gt_engine_workarounds),
- SUBTEST(live_engine_reset_gt_engine_workarounds),
+ SUBTEST(live_isolated_whitelist),
+ SUBTEST(live_gpu_reset_workarounds),
+ SUBTEST(live_engine_reset_workarounds),
};
int err;
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 1fa2f65c3cd1..c3d19d88da40 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -35,6 +35,7 @@
*/
#include "i915_drv.h"
+#include "i915_gem_fence_reg.h"
#include "gvt.h"
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
@@ -128,10 +129,10 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct drm_i915_fence_reg *reg;
+ struct i915_fence_reg *reg;
i915_reg_t fence_reg_lo, fence_reg_hi;
- assert_rpm_wakelock_held(dev_priv);
+ assert_rpm_wakelock_held(&dev_priv->runtime_pm);
if (WARN_ON(fence >= vgpu_fence_sz(vgpu)))
return;
@@ -163,13 +164,13 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct drm_i915_fence_reg *reg;
+ struct i915_fence_reg *reg;
u32 i;
if (WARN_ON(!vgpu_fence_sz(vgpu)))
return;
- intel_runtime_pm_get(dev_priv);
+ intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&dev_priv->drm.struct_mutex);
_clear_vgpu_fence(vgpu);
@@ -180,17 +181,18 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
}
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct drm_i915_fence_reg *reg;
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ struct i915_fence_reg *reg;
int i;
- intel_runtime_pm_get(dev_priv);
+ intel_runtime_pm_get(rpm);
/* Request fences from host */
mutex_lock(&dev_priv->drm.struct_mutex);
@@ -206,7 +208,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu);
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(rpm);
return 0;
out_free_fence:
gvt_vgpu_err("Failed to alloc fences\n");
@@ -219,7 +221,7 @@ out_free_fence:
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(rpm);
return -ENOSPC;
}
@@ -315,9 +317,9 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- intel_runtime_pm_get(dev_priv);
+ intel_runtime_pm_get(&dev_priv->runtime_pm);
_clear_vgpu_fence(vgpu);
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index de5347725564..6ea88270c818 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1725,7 +1725,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
int ret = 0;
struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
- unsigned long gma_start_offset = 0;
+ unsigned long start_offset = 0;
/* get the start gm address of the batch buffer */
gma = get_gma_bb_from_cmd(s, 1);
@@ -1742,7 +1742,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true;
- /* the gma_start_offset stores the batch buffer's start gma's
+ /* the start_offset stores the batch buffer's start gma's
* offset relative to page boundary. so for non-privileged batch
* buffer, the shadowed gem object holds exactly the same page
* layout as original gem object. This is for the convience of
@@ -1754,16 +1754,17 @@ static int perform_bb_shadow(struct parser_exec_state *s)
* that of shadowed page.
*/
if (bb->ppgtt)
- gma_start_offset = gma & ~I915_GTT_PAGE_MASK;
+ start_offset = gma & ~I915_GTT_PAGE_MASK;
- bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv,
- roundup(bb_size + gma_start_offset, PAGE_SIZE));
+ bb->obj = i915_gem_object_create_shmem(s->vgpu->gvt->dev_priv,
+ round_up(bb_size + start_offset,
+ PAGE_SIZE));
if (IS_ERR(bb->obj)) {
ret = PTR_ERR(bb->obj);
goto err_free_bb;
}
- ret = i915_gem_obj_prepare_shmem_write(bb->obj, &bb->clflush);
+ ret = i915_gem_object_prepare_write(bb->obj, &bb->clflush);
if (ret)
goto err_free_obj;
@@ -1780,7 +1781,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
ret = copy_gma_to_hva(s->vgpu, mm,
gma, gma + bb_size,
- bb->va + gma_start_offset);
+ bb->va + start_offset);
if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n");
ret = -EFAULT;
@@ -1806,13 +1807,13 @@ static int perform_bb_shadow(struct parser_exec_state *s)
* buffer's gma in pair. After all, we don't want to pin the shadow
* buffer here (too early).
*/
- s->ip_va = bb->va + gma_start_offset;
+ s->ip_va = bb->va + start_offset;
s->ip_gma = gma;
return 0;
err_unmap:
i915_gem_object_unpin_map(bb->obj);
err_finish_shmem_access:
- i915_gem_obj_finish_shmem_access(bb->obj);
+ i915_gem_object_finish_access(bb->obj);
err_free_obj:
i915_gem_object_put(bb->obj);
err_free_bb:
@@ -2829,9 +2830,9 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
int ret = 0;
void *map;
- obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv,
- roundup(ctx_size + CACHELINE_BYTES,
- PAGE_SIZE));
+ obj = i915_gem_object_create_shmem(workload->vgpu->gvt->dev_priv,
+ roundup(ctx_size + CACHELINE_BYTES,
+ PAGE_SIZE));
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -2843,7 +2844,9 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
goto put_obj;
}
+ i915_gem_object_lock(obj);
ret = i915_gem_object_set_to_cpu_domain(obj, false);
+ i915_gem_object_unlock(obj);
if (ret) {
gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
goto unmap_src;
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 8a9606f91e68..2fb7b73b260d 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -58,12 +58,12 @@ static int mmio_offset_compare(void *priv,
static inline int mmio_diff_handler(struct intel_gvt *gvt,
u32 offset, void *data)
{
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *i915 = gvt->dev_priv;
struct mmio_diff_param *param = data;
struct diff_mmio *node;
u32 preg, vreg;
- preg = I915_READ_NOTRACE(_MMIO(offset));
+ preg = intel_uncore_read_notrace(&i915->uncore, _MMIO(offset));
vreg = vgpu_vreg(param->vgpu, offset);
if (preg != vreg) {
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 4ac18b447247..049775e8e350 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -68,9 +68,10 @@ static struct bin_attribute firmware_attr = {
static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
{
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *i915 = gvt->dev_priv;
- *(u32 *)(data + offset) = I915_READ_NOTRACE(_MMIO(offset));
+ *(u32 *)(data + offset) = intel_uncore_read_notrace(&i915->uncore,
+ _MMIO(offset));
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index f5a328b5290a..7a1fe44d45af 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -87,7 +87,7 @@ struct intel_vgpu_gm {
/* Fences owned by a vGPU */
struct intel_vgpu_fence {
- struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
+ struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
u32 base;
u32 size;
};
@@ -149,9 +149,9 @@ struct intel_vgpu_submission_ops {
struct intel_vgpu_submission {
struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
struct list_head workload_q_head[I915_NUM_ENGINES];
+ struct intel_context *shadow[I915_NUM_ENGINES];
struct kmem_cache *workloads;
atomic_t running_workload_num;
- struct i915_gem_context *shadow_ctx;
union {
u64 i915_context_pml4;
u64 i915_context_pdps[GEN8_3LVL_PDPES];
@@ -390,7 +390,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
+ gvt_hidden_sz(gvt) - 1)
-#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
+#define gvt_fence_sz(gvt) ((gvt)->dev_priv->ggtt.num_fences)
/* Aperture/GM space definitions for vGPU */
#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
@@ -584,12 +584,12 @@ enum {
static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
{
- intel_runtime_pm_get(dev_priv);
+ intel_runtime_pm_get(&dev_priv->runtime_pm);
}
static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
{
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index a68addf95c23..144301b778df 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1576,7 +1576,7 @@ hw_id_show(struct device *dev, struct device_attribute *attr,
struct intel_vgpu *vgpu = (struct intel_vgpu *)
mdev_get_drvdata(mdev);
return sprintf(buf, "%u\n",
- vgpu->submission.shadow_ctx->hw_id);
+ vgpu->submission.shadow[0]->gem_context->hw_id);
}
return sprintf(buf, "\n");
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 90bb3df0db50..2998999e8568 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -34,6 +34,7 @@
*/
#include "i915_drv.h"
+#include "gt/intel_context.h"
#include "gvt.h"
#include "trace.h"
@@ -493,8 +494,7 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself.
*/
if (mmio->in_context &&
- !is_inhibit_context(intel_context_lookup(s->shadow_ctx,
- dev_priv->engine[ring_id])))
+ !is_inhibit_context(s->shadow[ring_id]))
continue;
if (mmio->mask)
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 276db53f1bf1..867e7629025b 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -30,7 +30,7 @@
* not do like this.
*/
#define _INTEL_BIOS_PRIVATE
-#include "intel_vbt_defs.h"
+#include "display/intel_vbt_defs.h"
#define OPREGION_SIGNATURE "IntelGraphicsMem"
#define MBOX_VBT (1<<3)
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 1c763a27a412..2369d4a9af94 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -465,7 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
scheduler->current_vgpu = NULL;
}
- intel_runtime_pm_get(dev_priv);
+ intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_bh(&scheduler->mmio_context_lock);
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
if (scheduler->engine_owner[ring_id] == vgpu) {
@@ -474,6 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
mutex_unlock(&vgpu->gvt->sched_lock);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 0f919f0a43d4..2144fb46d0e1 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -35,6 +35,10 @@
#include <linux/kthread.h>
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_context.h"
+
#include "i915_drv.h"
#include "gvt.h"
@@ -277,18 +281,23 @@ static int shadow_context_status_change(struct notifier_block *nb,
return NOTIFY_OK;
}
-static void shadow_context_descriptor_update(struct intel_context *ce)
+static void
+shadow_context_descriptor_update(struct intel_context *ce,
+ struct intel_vgpu_workload *workload)
{
- u64 desc = 0;
+ u64 desc = ce->lrc_desc;
- desc = ce->lrc_desc;
-
- /* Update bits 0-11 of the context descriptor which includes flags
+ /*
+ * Update bits 0-11 of the context descriptor which includes flags
* like GEN8_CTX_* cached in desc_template
*/
desc &= U64_MAX << 12;
desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
+ desc &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
+ desc |= workload->ctx_desc.addressing_mode <<
+ GEN8_CTX_ADDRESSING_MODE_SHIFT;
+
ce->lrc_desc = desc;
}
@@ -359,18 +368,20 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
struct i915_gem_context *ctx)
{
struct intel_vgpu_mm *mm = workload->shadow_mm;
- struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
int i = 0;
if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
return -EINVAL;
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
- px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
+ px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0];
} else {
for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
- px_dma(ppgtt->pdp.page_directory[i]) =
- mm->ppgtt_mm.shadow_pdps[i];
+ struct i915_page_directory * const pd =
+ i915_pd_entry(ppgtt->pd, i);
+
+ px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
}
}
@@ -382,26 +393,22 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
struct i915_request *rq;
- int ret = 0;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (workload->req)
- goto out;
+ return 0;
- rq = i915_request_alloc(engine, shadow_ctx);
+ rq = i915_request_create(s->shadow[workload->ring_id]);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
- ret = PTR_ERR(rq);
- goto out;
+ return PTR_ERR(rq);
}
+
workload->req = i915_request_get(rq);
-out:
- return ret;
+ return 0;
}
/**
@@ -416,10 +423,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
- struct intel_context *ce;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -427,29 +431,13 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (workload->shadow)
return 0;
- /* pin shadow context by gvt even the shadow context will be pinned
- * when i915 alloc request. That is because gvt will update the guest
- * context from shadow context when workload is completed, and at that
- * moment, i915 may already unpined the shadow context to make the
- * shadow_ctx pages invalid. So gvt need to pin itself. After update
- * the guest context, gvt can unpin the shadow_ctx safely.
- */
- ce = intel_context_pin(shadow_ctx, engine);
- if (IS_ERR(ce)) {
- gvt_vgpu_err("fail to pin shadow context\n");
- return PTR_ERR(ce);
- }
-
- shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
- shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
- GEN8_CTX_ADDRESSING_MODE_SHIFT;
-
if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
- shadow_context_descriptor_update(ce);
+ shadow_context_descriptor_update(s->shadow[workload->ring_id],
+ workload);
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
- goto err_unpin;
+ return ret;
if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
@@ -461,8 +449,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
return 0;
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
-err_unpin:
- intel_context_unpin(ce);
return ret;
}
@@ -501,7 +487,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
bb->obj->base.size);
bb->clflush &= ~CLFLUSH_AFTER;
}
- i915_gem_obj_finish_shmem_access(bb->obj);
+ i915_gem_object_finish_access(bb->obj);
bb->accessing = false;
} else {
@@ -525,18 +511,18 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
}
ret = i915_gem_object_set_to_gtt_domain(bb->obj,
- false);
+ false);
if (ret)
goto err;
- i915_gem_obj_finish_shmem_access(bb->obj);
- bb->accessing = false;
-
ret = i915_vma_move_to_active(bb->vma,
workload->req,
0);
if (ret)
goto err;
+
+ i915_gem_object_finish_access(bb->obj);
+ bb->accessing = false;
}
}
return 0;
@@ -607,7 +593,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
if (bb->obj) {
if (bb->accessing)
- i915_gem_obj_finish_shmem_access(bb->obj);
+ i915_gem_object_finish_access(bb->obj);
if (bb->va && !IS_ERR(bb->va))
i915_gem_object_unpin_map(bb->obj);
@@ -616,7 +602,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
i915_vma_unpin(bb->vma);
i915_vma_close(bb->vma);
}
- __i915_gem_object_release_unless_active(bb->obj);
+ i915_gem_object_put(bb->obj);
}
list_del(&bb->list);
kfree(bb);
@@ -689,7 +675,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct i915_request *rq;
int ring_id = workload->ring_id;
int ret;
@@ -700,7 +685,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex);
- ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
+ ret = set_context_ppgtt_from_shadow(workload,
+ s->shadow[ring_id]->gem_context);
if (ret < 0) {
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
goto err_req;
@@ -949,11 +935,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
intel_vgpu_trigger_virtual_event(vgpu, event);
}
- /* unpin shadow ctx as the shadow_ctx update is done */
- mutex_lock(&rq->i915->drm.struct_mutex);
- intel_context_unpin(rq->hw_context);
- mutex_unlock(&rq->i915->drm.struct_mutex);
-
i915_request_put(fetch_and_zero(&workload->req));
}
@@ -1032,8 +1013,6 @@ static int workload_thread(void *priv)
workload->ring_id, workload,
workload->vgpu->id);
- intel_runtime_pm_get(gvt->dev_priv);
-
gvt_dbg_sched("ring id %d will dispatch workload %p\n",
workload->ring_id, workload);
@@ -1063,7 +1042,6 @@ complete:
intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
- intel_runtime_pm_put_unchecked(gvt->dev_priv);
if (ret && (vgpu_is_vm_unhealthy(ret)))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
@@ -1146,17 +1124,20 @@ err:
}
static void
-i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s)
+i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s,
+ struct i915_ppgtt *ppgtt)
{
- struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
int i;
- if (i915_vm_is_4lvl(&i915_ppgtt->vm)) {
- px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4;
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
+ px_dma(ppgtt->pd) = s->i915_context_pml4;
} else {
- for (i = 0; i < GEN8_3LVL_PDPES; i++)
- px_dma(i915_ppgtt->pdp.page_directory[i]) =
- s->i915_context_pdps[i];
+ for (i = 0; i < GEN8_3LVL_PDPES; i++) {
+ struct i915_page_directory * const pd =
+ i915_pd_entry(ppgtt->pd, i);
+
+ px_dma(pd) = s->i915_context_pdps[i];
+ }
}
}
@@ -1170,10 +1151,15 @@ i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s)
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
{
struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
- i915_context_ppgtt_root_restore(s);
- i915_gem_context_put(s->shadow_ctx);
+
+ i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->gem_context->vm));
+ for_each_engine(engine, vgpu->gvt->dev_priv, id)
+ intel_context_unpin(s->shadow[id]);
+
kmem_cache_destroy(s->workloads);
}
@@ -1199,17 +1185,20 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
}
static void
-i915_context_ppgtt_root_save(struct intel_vgpu_submission *s)
+i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
+ struct i915_ppgtt *ppgtt)
{
- struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
int i;
- if (i915_vm_is_4lvl(&i915_ppgtt->vm))
- s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4);
- else {
- for (i = 0; i < GEN8_3LVL_PDPES; i++)
- s->i915_context_pdps[i] =
- px_dma(i915_ppgtt->pdp.page_directory[i]);
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
+ s->i915_context_pml4 = px_dma(ppgtt->pd);
+ } else {
+ for (i = 0; i < GEN8_3LVL_PDPES; i++) {
+ struct i915_page_directory * const pd =
+ i915_pd_entry(ppgtt->pd, i);
+
+ s->i915_context_pdps[i] = px_dma(pd);
+ }
}
}
@@ -1226,16 +1215,36 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s)
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- enum intel_engine_id i;
struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id i;
int ret;
- s->shadow_ctx = i915_gem_context_create_gvt(
- &vgpu->gvt->dev_priv->drm);
- if (IS_ERR(s->shadow_ctx))
- return PTR_ERR(s->shadow_ctx);
+ ctx = i915_gem_context_create_gvt(&vgpu->gvt->dev_priv->drm);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm));
+
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ struct intel_context *ce;
- i915_context_ppgtt_root_save(s);
+ INIT_LIST_HEAD(&s->workload_q_head[i]);
+ s->shadow[i] = ERR_PTR(-EINVAL);
+
+ ce = i915_gem_context_get_engine(ctx, i);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ goto out_shadow_ctx;
+ }
+
+ ret = intel_context_pin(ce);
+ intel_context_put(ce);
+ if (ret)
+ goto out_shadow_ctx;
+
+ s->shadow[i] = ce;
+ }
bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
@@ -1251,16 +1260,21 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
goto out_shadow_ctx;
}
- for_each_engine(engine, vgpu->gvt->dev_priv, i)
- INIT_LIST_HEAD(&s->workload_q_head[i]);
-
atomic_set(&s->running_workload_num, 0);
bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
+ i915_gem_context_put(ctx);
return 0;
out_shadow_ctx:
- i915_gem_context_put(s->shadow_ctx);
+ i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(ctx->vm));
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ if (IS_ERR(s->shadow[i]))
+ break;
+
+ intel_context_unpin(s->shadow[i]);
+ }
+ i915_gem_context_put(ctx);
return ret;
}
@@ -1520,11 +1534,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
* as there is only one pre-allocated buf-obj for shadow.
*/
if (list_empty(workload_q_head(vgpu, ring_id))) {
- intel_runtime_pm_get(dev_priv);
+ intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 863ae12707ba..293e5bcc4b6c 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -4,6 +4,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include "gt/intel_engine_pm.h"
+
#include "i915_drv.h"
#include "i915_active.h"
#include "i915_globals.h"
@@ -157,6 +159,7 @@ void i915_active_init(struct drm_i915_private *i915,
ref->retire = retire;
ref->tree = RB_ROOT;
i915_active_request_init(&ref->last, NULL, last_retire);
+ init_llist_head(&ref->barriers);
ref->count = 0;
}
@@ -263,6 +266,99 @@ void i915_active_fini(struct i915_active *ref)
}
#endif
+int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
+ struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct llist_node *pos, *next;
+ unsigned long tmp;
+ int err;
+
+ GEM_BUG_ON(!engine->mask);
+ for_each_engine_masked(engine, i915, engine->mask, tmp) {
+ struct intel_context *kctx = engine->kernel_context;
+ struct active_node *node;
+
+ node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
+ if (unlikely(!node)) {
+ err = -ENOMEM;
+ goto unwind;
+ }
+
+ i915_active_request_init(&node->base,
+ (void *)engine, node_retire);
+ node->timeline = kctx->ring->timeline->fence_context;
+ node->ref = ref;
+ ref->count++;
+
+ intel_engine_pm_get(engine);
+ llist_add((struct llist_node *)&node->base.link,
+ &ref->barriers);
+ }
+
+ return 0;
+
+unwind:
+ llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
+ struct active_node *node;
+
+ node = container_of((struct list_head *)pos,
+ typeof(*node), base.link);
+ engine = (void *)rcu_access_pointer(node->base.request);
+
+ intel_engine_pm_put(engine);
+ kmem_cache_free(global.slab_cache, node);
+ }
+ return err;
+}
+
+void i915_active_acquire_barrier(struct i915_active *ref)
+{
+ struct llist_node *pos, *next;
+
+ i915_active_acquire(ref);
+
+ llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
+ struct intel_engine_cs *engine;
+ struct active_node *node;
+ struct rb_node **p, *parent;
+
+ node = container_of((struct list_head *)pos,
+ typeof(*node), base.link);
+
+ engine = (void *)rcu_access_pointer(node->base.request);
+ RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
+
+ parent = NULL;
+ p = &ref->tree.rb_node;
+ while (*p) {
+ parent = *p;
+ if (rb_entry(parent,
+ struct active_node,
+ node)->timeline < node->timeline)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&node->node, parent, p);
+ rb_insert_color(&node->node, &ref->tree);
+
+ llist_add((struct llist_node *)&node->base.link,
+ &engine->barrier_tasks);
+ intel_engine_pm_put(engine);
+ }
+ i915_active_release(ref);
+}
+
+void i915_request_add_barriers(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ struct llist_node *node, *next;
+
+ llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks))
+ list_add_tail((struct list_head *)node, &rq->active_list);
+}
+
int i915_active_request_set(struct i915_active_request *active,
struct i915_request *rq)
{
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 7d758719ce39..c14eebf6d074 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -330,7 +330,7 @@ i915_active_request_retire(struct i915_active_request *active,
return 0;
ret = i915_request_wait(request,
- I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+ I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
return ret;
@@ -406,4 +406,9 @@ void i915_active_fini(struct i915_active *ref);
static inline void i915_active_fini(struct i915_active *ref) { }
#endif
+int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
+ struct intel_engine_cs *engine);
+void i915_active_acquire_barrier(struct i915_active *ref);
+void i915_request_add_barriers(struct i915_request *rq);
+
#endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index b679253b53a5..c025991b9233 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -7,6 +7,7 @@
#ifndef _I915_ACTIVE_TYPES_H_
#define _I915_ACTIVE_TYPES_H_
+#include <linux/llist.h>
#include <linux/rbtree.h>
#include <linux/rcupdate.h>
@@ -31,6 +32,8 @@ struct i915_active {
unsigned int count;
void (*retire)(struct i915_active *ref);
+
+ struct llist_head barriers;
};
#endif /* _I915_ACTIVE_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 503d548a55f7..a28bcd2d7c09 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -25,8 +25,9 @@
*
*/
+#include "gt/intel_engine.h"
+
#include "i915_drv.h"
-#include "intel_ringbuffer.h"
/**
* DOC: batch buffer command parser
@@ -1057,19 +1058,20 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
void *dst, *src;
int ret;
- ret = i915_gem_obj_prepare_shmem_read(src_obj, &src_needs_clflush);
+ ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush);
if (ret)
return ERR_PTR(ret);
- ret = i915_gem_obj_prepare_shmem_write(dst_obj, &dst_needs_clflush);
- if (ret) {
- dst = ERR_PTR(ret);
- goto unpin_src;
- }
-
dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
+ i915_gem_object_finish_access(dst_obj);
if (IS_ERR(dst))
- goto unpin_dst;
+ return dst;
+
+ ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush);
+ if (ret) {
+ i915_gem_object_unpin_map(dst_obj);
+ return ERR_PTR(ret);
+ }
src = ERR_PTR(-ENODEV);
if (src_needs_clflush &&
@@ -1115,13 +1117,11 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
}
}
+ i915_gem_object_finish_access(src_obj);
+
/* dst_obj is returned with vmap pinned */
*needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER;
-unpin_dst:
- i915_gem_obj_finish_shmem_access(dst_obj);
-unpin_src:
- i915_gem_obj_finish_shmem_access(src_obj);
return dst;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 5823ffb17821..62cf34db9280 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,15 +32,22 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_fourcc.h>
-#include "i915_reset.h"
-#include "intel_dp.h"
+#include "display/intel_dp.h"
+#include "display/intel_fbc.h"
+#include "display/intel_hdcp.h"
+#include "display/intel_hdmi.h"
+#include "display/intel_psr.h"
+
+#include "gem/i915_gem_context.h"
+#include "gt/intel_reset.h"
+
+#include "i915_debugfs.h"
+#include "i915_irq.h"
+#include "intel_csr.h"
#include "intel_drv.h"
-#include "intel_fbc.h"
#include "intel_guc_submission.h"
-#include "intel_hdcp.h"
-#include "intel_hdmi.h"
#include "intel_pm.h"
-#include "intel_psr.h"
+#include "intel_sideband.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -98,19 +105,6 @@ static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
return obj->mm.mapping ? 'M' : ' ';
}
-static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
-{
- u64 size = 0;
- struct i915_vma *vma;
-
- for_each_ggtt_vma(vma, obj) {
- if (drm_mm_node_allocated(&vma->node))
- size += vma->node.size;
- }
-
- return size;
-}
-
static const char *
stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
{
@@ -150,8 +144,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
unsigned int frontbuffer_bits;
int pin_count = 0;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
&obj->base,
get_active_flag(obj),
@@ -167,17 +159,17 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
- list_for_each_entry(vma, &obj->vma.list, obj_link) {
- if (i915_vma_is_pinned(vma))
- pin_count++;
- }
- seq_printf(m, " (pinned x %d)", pin_count);
- if (obj->pin_global)
- seq_printf(m, " (global)");
+
+ spin_lock(&obj->vma.lock);
list_for_each_entry(vma, &obj->vma.list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
+ spin_unlock(&obj->vma.lock);
+
+ if (i915_vma_is_pinned(vma))
+ pin_count++;
+
seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
i915_vma_is_ggtt(vma) ? "g" : "pp",
vma->node.start, vma->node.size,
@@ -206,6 +198,18 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
vma->ggtt_view.rotated.plane[1].offset);
break;
+ case I915_GGTT_VIEW_REMAPPED:
+ seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
+ vma->ggtt_view.remapped.plane[0].width,
+ vma->ggtt_view.remapped.plane[0].height,
+ vma->ggtt_view.remapped.plane[0].stride,
+ vma->ggtt_view.remapped.plane[0].offset,
+ vma->ggtt_view.remapped.plane[1].width,
+ vma->ggtt_view.remapped.plane[1].height,
+ vma->ggtt_view.remapped.plane[1].stride,
+ vma->ggtt_view.remapped.plane[1].offset);
+ break;
+
default:
MISSING_CASE(vma->ggtt_view.type);
break;
@@ -216,9 +220,16 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
vma->fence->id,
i915_active_request_isset(&vma->last_fence) ? "*" : "");
seq_puts(m, ")");
+
+ spin_lock(&obj->vma.lock);
}
+ spin_unlock(&obj->vma.lock);
+
+ seq_printf(m, " (pinned x %d)", pin_count);
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
+ if (obj->pin_global)
+ seq_printf(m, " (global)");
engine = i915_gem_object_last_write_engine(obj);
if (engine)
@@ -229,83 +240,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
}
-static int obj_rank_by_stolen(const void *A, const void *B)
-{
- const struct drm_i915_gem_object *a =
- *(const struct drm_i915_gem_object **)A;
- const struct drm_i915_gem_object *b =
- *(const struct drm_i915_gem_object **)B;
-
- if (a->stolen->start < b->stolen->start)
- return -1;
- if (a->stolen->start > b->stolen->start)
- return 1;
- return 0;
-}
-
-static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct drm_i915_gem_object **objects;
- struct drm_i915_gem_object *obj;
- u64 total_obj_size, total_gtt_size;
- unsigned long total, count, n;
- int ret;
-
- total = READ_ONCE(dev_priv->mm.object_count);
- objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
- if (!objects)
- return -ENOMEM;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- goto out;
-
- total_obj_size = total_gtt_size = count = 0;
-
- spin_lock(&dev_priv->mm.obj_lock);
- list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
- if (count == total)
- break;
-
- if (obj->stolen == NULL)
- continue;
-
- objects[count++] = obj;
- total_obj_size += obj->base.size;
- total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
-
- }
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
- if (count == total)
- break;
-
- if (obj->stolen == NULL)
- continue;
-
- objects[count++] = obj;
- total_obj_size += obj->base.size;
- }
- spin_unlock(&dev_priv->mm.obj_lock);
-
- sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
-
- seq_puts(m, "Stolen:\n");
- for (n = 0; n < count; n++) {
- seq_puts(m, " ");
- describe_obj(m, objects[n]);
- seq_putc(m, '\n');
- }
- seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
- count, total_obj_size, total_gtt_size);
-
- mutex_unlock(&dev->struct_mutex);
-out:
- kvfree(objects);
- return ret;
-}
-
struct file_stats {
struct i915_address_space *vm;
unsigned long count;
@@ -325,7 +259,7 @@ static int per_file_stats(int id, void *ptr, void *data)
stats->count++;
stats->total += obj->base.size;
- if (!obj->bind_count)
+ if (!atomic_read(&obj->bind_count))
stats->unbound += obj->base.size;
if (obj->base.name || obj->base.dma_buf)
stats->shared += obj->base.size;
@@ -395,17 +329,20 @@ static void print_context_stats(struct seq_file *m,
struct i915_gem_context *ctx;
list_for_each_entry(ctx, &i915->contexts.list, link) {
+ struct i915_gem_engines_iter it;
struct intel_context *ce;
- list_for_each_entry(ce, &ctx->active_engines, active_link) {
+ for_each_gem_engine(ce,
+ i915_gem_context_lock_engines(ctx), it) {
if (ce->state)
per_file_stats(0, ce->state->obj, &kstats);
if (ce->ring)
per_file_stats(0, ce->ring->vma->obj, &kstats);
}
+ i915_gem_context_unlock_engines(ctx);
if (!IS_ERR_OR_NULL(ctx->file_priv)) {
- struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
+ struct file_stats stats = { .vm = ctx->vm, };
struct drm_file *file = ctx->file_priv->file;
struct task_struct *task;
char name[80];
@@ -429,153 +366,22 @@ static void print_context_stats(struct seq_file *m,
static int i915_gem_object_info(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
- u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
- struct drm_i915_gem_object *obj;
- unsigned int page_sizes = 0;
- char buf[80];
+ struct drm_i915_private *i915 = node_to_i915(m->private);
int ret;
- seq_printf(m, "%u objects, %llu bytes\n",
- dev_priv->mm.object_count,
- dev_priv->mm.object_memory);
-
- size = count = 0;
- mapped_size = mapped_count = 0;
- purgeable_size = purgeable_count = 0;
- huge_size = huge_count = 0;
-
- spin_lock(&dev_priv->mm.obj_lock);
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
- size += obj->base.size;
- ++count;
-
- if (obj->mm.madv == I915_MADV_DONTNEED) {
- purgeable_size += obj->base.size;
- ++purgeable_count;
- }
-
- if (obj->mm.mapping) {
- mapped_count++;
- mapped_size += obj->base.size;
- }
-
- if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
- huge_count++;
- huge_size += obj->base.size;
- page_sizes |= obj->mm.page_sizes.sg;
- }
- }
- seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
-
- size = count = dpy_size = dpy_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
- size += obj->base.size;
- ++count;
-
- if (obj->pin_global) {
- dpy_size += obj->base.size;
- ++dpy_count;
- }
-
- if (obj->mm.madv == I915_MADV_DONTNEED) {
- purgeable_size += obj->base.size;
- ++purgeable_count;
- }
-
- if (obj->mm.mapping) {
- mapped_count++;
- mapped_size += obj->base.size;
- }
-
- if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
- huge_count++;
- huge_size += obj->base.size;
- page_sizes |= obj->mm.page_sizes.sg;
- }
- }
- spin_unlock(&dev_priv->mm.obj_lock);
-
- seq_printf(m, "%u bound objects, %llu bytes\n",
- count, size);
- seq_printf(m, "%u purgeable objects, %llu bytes\n",
- purgeable_count, purgeable_size);
- seq_printf(m, "%u mapped objects, %llu bytes\n",
- mapped_count, mapped_size);
- seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
- huge_count,
- stringify_page_sizes(page_sizes, buf, sizeof(buf)),
- huge_size);
- seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
- dpy_count, dpy_size);
-
- seq_printf(m, "%llu [%pa] gtt total\n",
- ggtt->vm.total, &ggtt->mappable_end);
- seq_printf(m, "Supported page sizes: %s\n",
- stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
- buf, sizeof(buf)));
+ seq_printf(m, "%u shrinkable objects, %llu bytes\n",
+ i915->mm.shrink_count,
+ i915->mm.shrink_memory);
seq_putc(m, '\n');
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- print_batch_pool_stats(m, dev_priv);
- print_context_stats(m, dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-static int i915_gem_gtt_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = m->private;
- struct drm_i915_private *dev_priv = node_to_i915(node);
- struct drm_device *dev = &dev_priv->drm;
- struct drm_i915_gem_object **objects;
- struct drm_i915_gem_object *obj;
- u64 total_obj_size, total_gtt_size;
- unsigned long nobject, n;
- int count, ret;
-
- nobject = READ_ONCE(dev_priv->mm.object_count);
- objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
- if (!objects)
- return -ENOMEM;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
return ret;
- count = 0;
- spin_lock(&dev_priv->mm.obj_lock);
- list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
- objects[count++] = obj;
- if (count == nobject)
- break;
- }
- spin_unlock(&dev_priv->mm.obj_lock);
-
- total_obj_size = total_gtt_size = 0;
- for (n = 0; n < count; n++) {
- obj = objects[n];
-
- seq_puts(m, " ");
- describe_obj(m, obj);
- seq_putc(m, '\n');
- total_obj_size += obj->base.size;
- total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
- }
-
- mutex_unlock(&dev->struct_mutex);
-
- seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
- count, total_obj_size, total_gtt_size);
- kvfree(objects);
+ print_batch_pool_stats(m, i915);
+ print_context_stats(m, i915);
+ mutex_unlock(&i915->drm.struct_mutex);
return 0;
}
@@ -685,7 +491,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
intel_wakeref_t wakeref;
int i, pipe;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
if (IS_CHERRYVIEW(dev_priv)) {
intel_wakeref_t pref;
@@ -891,35 +697,32 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
}
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- int i, ret;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ unsigned int i;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
+ seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
- seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct i915_vma *vma = dev_priv->fence_regs[i].vma;
+ rcu_read_lock();
+ for (i = 0; i < i915->ggtt.num_fences; i++) {
+ struct i915_vma *vma = i915->ggtt.fence_regs[i].vma;
seq_printf(m, "Fence %d, pin count = %d, object = ",
- i, dev_priv->fence_regs[i].pin_count);
+ i, i915->ggtt.fence_regs[i].pin_count);
if (!vma)
seq_puts(m, "unused");
else
describe_obj(m, vma->obj);
seq_putc(m, '\n');
}
+ rcu_read_unlock();
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -967,7 +770,7 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file)
intel_wakeref_t wakeref;
gpu = NULL;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
gpu = i915_capture_gpu_state(i915);
if (IS_ERR(gpu))
return PTR_ERR(gpu);
@@ -1026,15 +829,16 @@ static const struct file_operations i915_error_state_fops = {
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_rps *rps = &dev_priv->gt_pm.rps;
intel_wakeref_t wakeref;
int ret = 0;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
if (IS_GEN(dev_priv, 5)) {
- u16 rgvswctl = I915_READ16(MEMSWCTL);
- u16 rgvstat = I915_READ16(MEMSTAT_ILK);
+ u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
+ u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
@@ -1045,8 +849,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 rpmodectl, freq_sts;
- mutex_lock(&dev_priv->pcu_lock);
-
rpmodectl = I915_READ(GEN6_RP_CONTROL);
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
@@ -1056,7 +858,10 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
GEN6_RP_MEDIA_SW_MODE));
+ vlv_punit_get(dev_priv);
freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+ vlv_punit_put(dev_priv);
+
seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
@@ -1078,7 +883,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(dev_priv, rps->efficient_freq));
- mutex_unlock(&dev_priv->pcu_lock);
} else if (INTEL_GEN(dev_priv) >= 6) {
u32 rp_state_limits;
u32 gt_perf_status;
@@ -1242,7 +1046,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return ret;
}
@@ -1279,7 +1083,6 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
u64 acthd[I915_NUM_ENGINES];
- u32 seqno[I915_NUM_ENGINES];
struct intel_instdone instdone;
intel_wakeref_t wakeref;
enum intel_engine_id id;
@@ -1295,11 +1098,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
return 0;
}
- with_intel_runtime_pm(dev_priv, wakeref) {
- for_each_engine(engine, dev_priv, id) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
+ for_each_engine(engine, dev_priv, id)
acthd[id] = intel_engine_get_active_head(engine);
- seqno[id] = intel_engine_get_hangcheck_seqno(engine);
- }
intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
}
@@ -1316,11 +1117,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
for_each_engine(engine, dev_priv, id) {
- seq_printf(m, "%s:\n", engine->name);
- seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
- engine->hangcheck.last_seqno,
- seqno[id],
- engine->hangcheck.next_seqno,
+ seq_printf(m, "%s: %d ms ago\n",
+ engine->name,
jiffies_to_msecs(jiffies -
engine->hangcheck.action_timestamp));
@@ -1362,13 +1160,14 @@ static int i915_reset_info(struct seq_file *m, void *unused)
static int ironlake_drpc_info(struct seq_file *m)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_uncore *uncore = &i915->uncore;
u32 rgvmodectl, rstdbyctl;
u16 crstandvid;
- rgvmodectl = I915_READ(MEMMODECTL);
- rstdbyctl = I915_READ(RSTDBYCTL);
- crstandvid = I915_READ16(CRSTANDVID);
+ rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
+ rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
+ crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
seq_printf(m, "Boost freq: %d\n",
@@ -1483,12 +1282,9 @@ static int gen6_drpc_info(struct seq_file *m)
gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
}
- if (INTEL_GEN(dev_priv) <= 7) {
- mutex_lock(&dev_priv->pcu_lock);
+ if (INTEL_GEN(dev_priv) <= 7)
sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
- &rc6vids);
- mutex_unlock(&dev_priv->pcu_lock);
- }
+ &rc6vids, NULL);
seq_printf(m, "RC1e Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
@@ -1562,7 +1358,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
intel_wakeref_t wakeref;
int err = -ENODEV;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
err = vlv_drpc_info(m);
else if (INTEL_GEN(dev_priv) >= 6)
@@ -1596,7 +1392,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
if (!HAS_FBC(dev_priv))
return -ENODEV;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&fbc->lock);
if (intel_fbc_is_active(dev_priv))
@@ -1623,7 +1419,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
}
mutex_unlock(&fbc->lock);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
@@ -1673,7 +1469,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
if (!HAS_IPS(dev_priv))
return -ENODEV;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "Enabled by kernel parameter: %s\n",
yesno(i915_modparams.enable_ips));
@@ -1687,7 +1483,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
seq_puts(m, "Currently: disabled\n");
}
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
@@ -1729,7 +1525,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
if (!IS_GEN(i915, 5))
return -ENODEV;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
unsigned long temp, chipset, gfx;
temp = i915_mch_val(i915);
@@ -1752,17 +1548,10 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
unsigned int max_gpu_freq, min_gpu_freq;
intel_wakeref_t wakeref;
int gpu_freq, ia_freq;
- int ret;
if (!HAS_LLC(dev_priv))
return -ENODEV;
- wakeref = intel_runtime_pm_get(dev_priv);
-
- ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
- if (ret)
- goto out;
-
min_gpu_freq = rps->min_freq;
max_gpu_freq = rps->max_freq;
if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
@@ -1773,11 +1562,12 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
ia_freq = gpu_freq;
sandybridge_pcode_read(dev_priv,
GEN6_PCODE_READ_MIN_FREQ_TABLE,
- &ia_freq);
+ &ia_freq, NULL);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
intel_gpu_freq(dev_priv, (gpu_freq *
(IS_GEN9_BC(dev_priv) ||
@@ -1786,12 +1576,9 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
}
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev_priv->pcu_lock);
-
-out:
- intel_runtime_pm_put(dev_priv, wakeref);
- return ret;
+ return 0;
}
static int i915_opregion(struct seq_file *m, void *unused)
@@ -1892,6 +1679,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
return ret;
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
+ struct i915_gem_engines_iter it;
struct intel_context *ce;
seq_puts(m, "HW context ");
@@ -1916,7 +1704,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, '\n');
- list_for_each_entry(ce, &ctx->active_engines, active_link) {
+ for_each_gem_engine(ce,
+ i915_gem_context_lock_engines(ctx), it) {
seq_printf(m, "%s: ", ce->engine->name);
if (ce->state)
describe_obj(m, ce->state->obj);
@@ -1924,6 +1713,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
describe_ctx_ring(m, ce->ring);
seq_putc(m, '\n');
}
+ i915_gem_context_unlock_engines(ctx);
seq_putc(m, '\n');
}
@@ -1960,9 +1750,10 @@ static const char *swizzle_string(unsigned swizzle)
static int i915_swizzle_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_uncore *uncore = &dev_priv->uncore;
intel_wakeref_t wakeref;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
swizzle_string(dev_priv->mm.bit_6_swizzle_x));
@@ -1971,36 +1762,36 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
if (IS_GEN_RANGE(dev_priv, 3, 4)) {
seq_printf(m, "DDC = 0x%08x\n",
- I915_READ(DCC));
+ intel_uncore_read(uncore, DCC));
seq_printf(m, "DDC2 = 0x%08x\n",
- I915_READ(DCC2));
+ intel_uncore_read(uncore, DCC2));
seq_printf(m, "C0DRB3 = 0x%04x\n",
- I915_READ16(C0DRB3));
+ intel_uncore_read16(uncore, C0DRB3));
seq_printf(m, "C1DRB3 = 0x%04x\n",
- I915_READ16(C1DRB3));
+ intel_uncore_read16(uncore, C1DRB3));
} else if (INTEL_GEN(dev_priv) >= 6) {
seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
- I915_READ(MAD_DIMM_C0));
+ intel_uncore_read(uncore, MAD_DIMM_C0));
seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
- I915_READ(MAD_DIMM_C1));
+ intel_uncore_read(uncore, MAD_DIMM_C1));
seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
- I915_READ(MAD_DIMM_C2));
+ intel_uncore_read(uncore, MAD_DIMM_C2));
seq_printf(m, "TILECTL = 0x%08x\n",
- I915_READ(TILECTL));
+ intel_uncore_read(uncore, TILECTL));
if (INTEL_GEN(dev_priv) >= 8)
seq_printf(m, "GAMTARBMODE = 0x%08x\n",
- I915_READ(GAMTARBMODE));
+ intel_uncore_read(uncore, GAMTARBMODE));
else
seq_printf(m, "ARB_MODE = 0x%08x\n",
- I915_READ(ARB_MODE));
+ intel_uncore_read(uncore, ARB_MODE));
seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
- I915_READ(DISP_ARB_CTL));
+ intel_uncore_read(uncore, DISP_ARB_CTL));
}
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
seq_puts(m, "L-shaped memory detected\n");
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
@@ -2026,13 +1817,13 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
u32 act_freq = rps->cur_freq;
intel_wakeref_t wakeref;
- with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
+ with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- mutex_lock(&dev_priv->pcu_lock);
+ vlv_punit_get(dev_priv);
act_freq = vlv_punit_read(dev_priv,
PUNIT_REG_GPU_FREQ_STS);
+ vlv_punit_put(dev_priv);
act_freq = (act_freq >> 8) & 0xff;
- mutex_unlock(&dev_priv->pcu_lock);
} else {
act_freq = intel_get_cagf(dev_priv,
I915_READ(GEN6_RPSTAT1));
@@ -2040,8 +1831,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
}
seq_printf(m, "RPS enabled? %d\n", rps->enabled);
- seq_printf(m, "GPU busy? %s [%d requests]\n",
- yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
+ seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
@@ -2060,9 +1850,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
- if (INTEL_GEN(dev_priv) >= 6 &&
- rps->enabled &&
- dev_priv->gt.active_requests) {
+ if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
u32 rpup, rpupei;
u32 rpdown, rpdownei;
@@ -2112,7 +1900,7 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
p = drm_seq_file_printer(m);
intel_uc_fw_dump(&dev_priv->huc.fw, &p);
- with_intel_runtime_pm(dev_priv, wakeref)
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
return 0;
@@ -2130,7 +1918,7 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
p = drm_seq_file_printer(m);
intel_uc_fw_dump(&dev_priv->guc.fw, &p);
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
u32 tmp = I915_READ(GUC_STATUS);
u32 i;
@@ -2516,7 +2304,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
if (!psr->sink_support)
return 0;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&psr->lock);
if (psr->enabled)
@@ -2580,7 +2368,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
unlock:
mutex_unlock(&psr->lock);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
@@ -2597,11 +2385,11 @@ i915_edp_psr_debug_set(void *data, u64 val)
DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
ret = intel_psr_debug_set(dev_priv, val);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return ret;
}
@@ -2636,7 +2424,7 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
return -ENODEV;
units = (power & 0x1f00) >> 8;
- with_intel_runtime_pm(dev_priv, wakeref)
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
power = I915_READ(MCH_SECP_NRG_STTS);
power = (1000000 * power) >> units; /* convert to uJ */
@@ -2672,7 +2460,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
struct drm_printer p = drm_seq_file_printer(m);
- print_intel_runtime_pm_wakeref(dev_priv, &p);
+ print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
}
return 0;
@@ -2717,7 +2505,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
csr = &dev_priv->csr;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
seq_printf(m, "path: %s\n", csr->fw_path);
@@ -2743,7 +2531,7 @@ out:
seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
@@ -3027,7 +2815,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
struct drm_connector_list_iter conn_iter;
intel_wakeref_t wakeref;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "CRTC info\n");
seq_printf(m, "---------\n");
@@ -3076,7 +2864,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
drm_connector_list_iter_end(&conn_iter);
mutex_unlock(&dev->mode_config.mutex);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
@@ -3089,11 +2877,11 @@ static int i915_engine_info(struct seq_file *m, void *unused)
enum intel_engine_id id;
struct drm_printer p;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- seq_printf(m, "GT awake? %s\n", yesno(dev_priv->gt.awake));
- seq_printf(m, "Global active requests: %d\n",
- dev_priv->gt.active_requests);
+ seq_printf(m, "GT awake? %s [%d]\n",
+ yesno(dev_priv->gt.awake),
+ atomic_read(&dev_priv->gt.wakeref.count));
seq_printf(m, "CS timestamp frequency: %u kHz\n",
RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
@@ -3101,7 +2889,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
for_each_engine(engine, dev_priv, id)
intel_engine_dump(engine, &p, "%s\n", engine->name);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
@@ -3222,7 +3010,7 @@ static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
if (ret < 0)
return ret;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
if (!dev_priv->ipc_enabled && enable)
DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
dev_priv->wm.distrust_bios_wm = true;
@@ -3904,14 +3692,26 @@ i915_drop_caches_set(void *data, u64 val)
/* No need to check and wait for gpu resets, only libdrm auto-restarts
* on ioctls on -EAGAIN. */
- if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
+ if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) {
int ret;
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
return ret;
- if (val & DROP_ACTIVE)
+ /*
+ * To finish the flush of the idle_worker, we must complete
+ * the switch-to-kernel-context, which requires a double
+ * pass through wait_for_idle: first queues the switch,
+ * second waits for the switch.
+ */
+ if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE))
+ ret = i915_gem_wait_for_idle(i915,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+
+ if (ret == 0 && val & DROP_IDLE)
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED,
@@ -3938,11 +3738,8 @@ i915_drop_caches_set(void *data, u64 val)
fs_reclaim_release(GFP_KERNEL);
if (val & DROP_IDLE) {
- do {
- if (READ_ONCE(i915->gt.active_requests))
- flush_delayed_work(&i915->gt.retire_work);
- drain_delayed_work(&i915->gt.idle_work);
- } while (READ_ONCE(i915->gt.awake));
+ flush_delayed_work(&i915->gem.retire_work);
+ flush_work(&i915->gem.idle_work);
}
if (val & DROP_FREED)
@@ -3965,7 +3762,7 @@ i915_cache_sharing_get(void *data, u64 *val)
if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
return -ENODEV;
- with_intel_runtime_pm(dev_priv, wakeref)
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
@@ -3986,7 +3783,7 @@ i915_cache_sharing_set(void *data, u64 val)
return -EINVAL;
DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
u32 snpcr;
/* Update the cache sharing policy here as well */
@@ -4164,7 +3961,7 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
}
sseu->eu_total = sseu->eu_per_subslice *
- sseu_subslice_total(sseu);
+ intel_sseu_subslice_total(sseu);
/* subtract fused off EU(s) from enabled slice(s) */
for (s = 0; s < fls(sseu->slice_mask); s++) {
@@ -4188,10 +3985,10 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
seq_printf(m, " %s Slice Total: %u\n", type,
hweight8(sseu->slice_mask));
seq_printf(m, " %s Subslice Total: %u\n", type,
- sseu_subslice_total(sseu));
+ intel_sseu_subslice_total(sseu));
for (s = 0; s < fls(sseu->slice_mask); s++) {
seq_printf(m, " %s Slice%i subslices: %u\n", type,
- s, hweight8(sseu->subslice_mask[s]));
+ s, intel_sseu_subslices_per_slice(sseu, s));
}
seq_printf(m, " %s EU Total: %u\n", type,
sseu->eu_total);
@@ -4232,7 +4029,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
sseu.max_eus_per_subslice =
RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
if (IS_CHERRYVIEW(dev_priv))
cherryview_sseu_device_status(dev_priv, &sseu);
else if (IS_BROADWELL(dev_priv))
@@ -4255,7 +4052,8 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
if (INTEL_GEN(i915) < 6)
return 0;
- file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
+ file->private_data =
+ (void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm);
intel_uncore_forcewake_user_get(&i915->uncore);
return 0;
@@ -4269,7 +4067,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
return 0;
intel_uncore_forcewake_user_put(&i915->uncore);
- intel_runtime_pm_put(i915,
+ intel_runtime_pm_put(&i915->runtime_pm,
(intel_wakeref_t)(uintptr_t)file->private_data);
return 0;
@@ -4570,8 +4368,6 @@ static const struct file_operations i915_fifo_underrun_reset_ops = {
static const struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
- {"i915_gem_gtt", i915_gem_gtt_info, 0},
- {"i915_gem_stolen", i915_gem_stolen_list_info },
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
@@ -4648,23 +4444,17 @@ static const struct i915_debugfs_files {
int i915_debugfs_register(struct drm_i915_private *dev_priv)
{
struct drm_minor *minor = dev_priv->drm.primary;
- struct dentry *ent;
int i;
- ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
- minor->debugfs_root, to_i915(minor->dev),
- &i915_forcewake_fops);
- if (!ent)
- return -ENOMEM;
+ debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
+ to_i915(minor->dev), &i915_forcewake_fops);
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
- ent = debugfs_create_file(i915_debugfs_files[i].name,
- S_IRUGO | S_IWUSR,
- minor->debugfs_root,
- to_i915(minor->dev),
- i915_debugfs_files[i].fops);
- if (!ent)
- return -ENOMEM;
+ debugfs_create_file(i915_debugfs_files[i].name,
+ S_IRUGO | S_IWUSR,
+ minor->debugfs_root,
+ to_i915(minor->dev),
+ i915_debugfs_files[i].fops);
}
return drm_debugfs_create_files(i915_debugfs_list,
@@ -4757,6 +4547,7 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct intel_connector *intel_connector = to_intel_connector(connector);
+ bool hdcp_cap, hdcp2_cap;
if (connector->status != connector_status_connected)
return -ENODEV;
@@ -4767,8 +4558,16 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
seq_printf(m, "%s:%d HDCP version: ", connector->name,
connector->base.id);
- seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
- "None" : "HDCP1.4");
+ hdcp_cap = intel_hdcp_capable(intel_connector);
+ hdcp2_cap = intel_hdcp2_capable(intel_connector);
+
+ if (hdcp_cap)
+ seq_puts(m, "HDCP1.4 ");
+ if (hdcp2_cap)
+ seq_puts(m, "HDCP2.2 ");
+
+ if (!hdcp_cap && !hdcp2_cap)
+ seq_puts(m, "None");
seq_puts(m, "\n");
return 0;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.h b/drivers/gpu/drm/i915/i915_debugfs.h
new file mode 100644
index 000000000000..c0cd22eb916d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_debugfs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_DEBUGFS_H__
+#define __I915_DEBUGFS_H__
+
+struct drm_i915_private;
+struct drm_connector;
+
+#ifdef CONFIG_DEBUG_FS
+int i915_debugfs_register(struct drm_i915_private *dev_priv);
+int i915_debugfs_connector_add(struct drm_connector *connector);
+#else
+static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) { return 0; }
+static inline int i915_debugfs_connector_add(struct drm_connector *connector) { return 0; }
+#endif
+
+#endif /* __I915_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 1ad88e6d7c04..f62e3397d936 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -47,22 +47,35 @@
#include <drm/drm_probe_helper.h>
#include <drm/i915_drm.h>
+#include "display/intel_acpi.h"
+#include "display/intel_audio.h"
+#include "display/intel_bw.h"
+#include "display/intel_cdclk.h"
+#include "display/intel_dp.h"
+#include "display/intel_fbdev.h"
+#include "display/intel_gmbus.h"
+#include "display/intel_hotplug.h"
+#include "display/intel_overlay.h"
+#include "display/intel_pipe_crc.h"
+#include "display/intel_sprite.h"
+
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_ioctls.h"
+#include "gt/intel_gt_pm.h"
+#include "gt/intel_reset.h"
+#include "gt/intel_workarounds.h"
+
+#include "i915_debugfs.h"
#include "i915_drv.h"
+#include "i915_irq.h"
#include "i915_pmu.h"
#include "i915_query.h"
-#include "i915_reset.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "intel_audio.h"
-#include "intel_cdclk.h"
#include "intel_csr.h"
-#include "intel_dp.h"
#include "intel_drv.h"
-#include "intel_fbdev.h"
#include "intel_pm.h"
-#include "intel_sprite.h"
#include "intel_uc.h"
-#include "intel_workarounds.h"
static struct drm_driver driver;
@@ -186,7 +199,8 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
!IS_COFFEELAKE(dev_priv));
- return PCH_KBP;
+ /* KBP is SPT compatible */
+ return PCH_SPT;
case INTEL_PCH_CNP_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
@@ -204,6 +218,10 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
DRM_DEBUG_KMS("Found Ice Lake PCH\n");
WARN_ON(!IS_ICELAKE(dev_priv));
return PCH_ICP;
+ case INTEL_PCH_MCC_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n");
+ WARN_ON(!IS_ELKHARTLAKE(dev_priv));
+ return PCH_MCC;
default:
return PCH_NONE;
}
@@ -231,7 +249,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
* make an educated guess as to which PCH is really there.
*/
- if (IS_ICELAKE(dev_priv))
+ if (IS_ELKHARTLAKE(dev_priv))
+ id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
+ else if (IS_ICELAKE(dev_priv))
id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
@@ -319,6 +339,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
drm_i915_getparam_t *param = data;
int value;
@@ -336,7 +357,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = pdev->revision;
break;
case I915_PARAM_NUM_FENCES_AVAIL:
- value = dev_priv->num_fence_regs;
+ value = dev_priv->ggtt.num_fences;
break;
case I915_PARAM_HAS_OVERLAY:
value = dev_priv->overlay ? 1 : 0;
@@ -372,12 +393,12 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = i915_cmd_parser_get_version(dev_priv);
break;
case I915_PARAM_SUBSLICE_TOTAL:
- value = sseu_subslice_total(&RUNTIME_INFO(dev_priv)->sseu);
+ value = intel_sseu_subslice_total(sseu);
if (!value)
return -ENODEV;
break;
case I915_PARAM_EU_TOTAL:
- value = RUNTIME_INFO(dev_priv)->sseu.eu_total;
+ value = sseu->eu_total;
if (!value)
return -ENODEV;
break;
@@ -394,7 +415,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = HAS_POOLED_EU(dev_priv);
break;
case I915_PARAM_MIN_EU_IN_POOL:
- value = RUNTIME_INFO(dev_priv)->sseu.min_eu_in_pool;
+ value = sseu->min_eu_in_pool;
break;
case I915_PARAM_HUC_STATUS:
value = intel_huc_check_status(&dev_priv->huc);
@@ -433,6 +454,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
case I915_PARAM_HAS_EXEC_CAPTURE:
case I915_PARAM_HAS_EXEC_BATCH_FIRST:
case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
+ case I915_PARAM_HAS_EXEC_SUBMIT_FENCE:
/* For the time being all of these are always true;
* if some supported hardware does not have one of these
* features this value needs to be provided from
@@ -444,12 +466,12 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = intel_engines_has_context_isolation(dev_priv);
break;
case I915_PARAM_SLICE_MASK:
- value = RUNTIME_INFO(dev_priv)->sseu.slice_mask;
+ value = sseu->slice_mask;
if (!value)
return -ENODEV;
break;
case I915_PARAM_SUBSLICE_MASK:
- value = RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0];
+ value = sseu->subslice_mask[0];
if (!value)
return -ENODEV;
break;
@@ -697,7 +719,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_csr;
- intel_setup_gmbus(dev_priv);
+ intel_gmbus_setup(dev_priv);
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
@@ -727,12 +749,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
cleanup_gem:
i915_gem_suspend(dev_priv);
+ i915_gem_fini_hw(dev_priv);
i915_gem_fini(dev_priv);
cleanup_modeset:
intel_modeset_cleanup(dev);
cleanup_irq:
drm_irq_uninstall(dev);
- intel_teardown_gmbus(dev_priv);
+ intel_gmbus_teardown(dev_priv);
cleanup_csr:
intel_csr_ucode_fini(dev_priv);
intel_power_domains_fini_hw(dev_priv);
@@ -884,13 +907,16 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->backlight_lock);
mutex_init(&dev_priv->sb_lock);
+ pm_qos_add_request(&dev_priv->sb_qos,
+ PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+
mutex_init(&dev_priv->av_mutex);
mutex_init(&dev_priv->wm.wm_mutex);
mutex_init(&dev_priv->pps_mutex);
mutex_init(&dev_priv->hdcp_comp_mutex);
i915_memcpy_init_early(dev_priv);
- intel_runtime_pm_init_early(dev_priv);
+ intel_runtime_pm_init_early(&dev_priv->runtime_pm);
ret = i915_workqueues_init(dev_priv);
if (ret < 0)
@@ -943,6 +969,9 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
i915_gem_cleanup_early(dev_priv);
i915_workqueues_cleanup(dev_priv);
i915_engines_cleanup(dev_priv);
+
+ pm_qos_remove_request(&dev_priv->sb_qos);
+ mutex_destroy(&dev_priv->sb_lock);
}
/**
@@ -1603,7 +1632,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_uncore_sanitize(dev_priv);
intel_gt_init_workarounds(dev_priv);
- i915_gem_load_init_fences(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
@@ -1640,6 +1668,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
*/
intel_get_dram_info(dev_priv);
+ intel_bw_init_hw(dev_priv);
return 0;
@@ -1668,7 +1697,6 @@ static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
pci_disable_msi(pdev);
pm_qos_remove_request(&dev_priv->pm_qos);
- i915_ggtt_cleanup_hw(dev_priv);
}
/**
@@ -1730,7 +1758,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
drm_kms_helper_poll_init(dev);
intel_power_domains_enable(dev_priv);
- intel_runtime_pm_enable(dev_priv);
+ intel_runtime_pm_enable(&dev_priv->runtime_pm);
}
/**
@@ -1739,7 +1767,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
*/
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
- intel_runtime_pm_disable(dev_priv);
+ intel_runtime_pm_disable(&dev_priv->runtime_pm);
intel_power_domains_disable(dev_priv);
intel_fbdev_unregister(dev_priv);
@@ -1760,7 +1788,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
i915_pmu_unregister(dev_priv);
i915_teardown_sysfs(dev_priv);
- drm_dev_unregister(&dev_priv->drm);
+ drm_dev_unplug(&dev_priv->drm);
i915_gem_shrinker_unregister(dev_priv);
}
@@ -1868,7 +1896,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto out_pci_disable;
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
ret = i915_driver_init_mmio(dev_priv);
if (ret < 0)
@@ -1884,7 +1912,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
i915_driver_register(dev_priv);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
i915_welcome_messages(dev_priv);
@@ -1892,10 +1920,11 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
out_cleanup_hw:
i915_driver_cleanup_hw(dev_priv);
+ i915_ggtt_cleanup_hw(dev_priv);
out_cleanup_mmio:
i915_driver_cleanup_mmio(dev_priv);
out_runtime_pm_put:
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
i915_driver_cleanup_early(dev_priv);
out_pci_disable:
pci_disable_device(pdev);
@@ -1910,7 +1939,7 @@ void i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
i915_driver_unregister(dev_priv);
@@ -1943,20 +1972,29 @@ void i915_driver_unload(struct drm_device *dev)
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
i915_reset_error_state(dev_priv);
- i915_gem_fini(dev_priv);
+ i915_gem_fini_hw(dev_priv);
intel_power_domains_fini_hw(dev_priv);
i915_driver_cleanup_hw(dev_priv);
- i915_driver_cleanup_mmio(dev_priv);
- enable_rpm_wakeref_asserts(dev_priv);
- intel_runtime_pm_cleanup(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
}
static void i915_driver_release(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+
+ disable_rpm_wakeref_asserts(rpm);
+
+ i915_gem_fini(dev_priv);
+
+ i915_ggtt_cleanup_hw(dev_priv);
+ i915_driver_cleanup_mmio(dev_priv);
+
+ enable_rpm_wakeref_asserts(rpm);
+ intel_runtime_pm_cleanup(rpm);
i915_driver_cleanup_early(dev_priv);
i915_driver_destroy(dev_priv);
@@ -2050,7 +2088,7 @@ static int i915_drm_suspend(struct drm_device *dev)
struct pci_dev *pdev = dev_priv->drm.pdev;
pci_power_t opregion_target_state;
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
@@ -2084,7 +2122,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_csr_ucode_suspend(dev_priv);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return 0;
}
@@ -2105,9 +2143,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
int ret;
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(rpm);
i915_gem_suspend_late(dev_priv);
@@ -2148,9 +2187,9 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
pci_set_power_state(pdev, PCI_D3hot);
out:
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(rpm);
if (!dev_priv->uncore.user_forcewake.count)
- intel_runtime_pm_cleanup(dev_priv);
+ intel_runtime_pm_cleanup(rpm);
return ret;
}
@@ -2184,7 +2223,7 @@ static int i915_drm_resume(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
intel_sanitize_gt_powersave(dev_priv);
i915_gem_sanitize(dev_priv);
@@ -2244,7 +2283,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_power_domains_enable(dev_priv);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return 0;
}
@@ -2299,7 +2338,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
pci_set_master(pdev);
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_resume_prepare(dev_priv, false);
@@ -2322,9 +2361,9 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_power_domains_resume(dev_priv);
- intel_engines_sanitize(dev_priv, true);
+ intel_gt_sanitize(dev_priv, true);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
}
@@ -2677,7 +2716,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
}
-static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
+static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
u32 mask, u32 val)
{
i915_reg_t reg = VLV_GTLC_PW_STATUS;
@@ -2691,7 +2730,9 @@ static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
* Transitioning between RC6 states should be at most 2ms (see
* valleyview_enable_rps) so use a 3ms timeout.
*/
- ret = wait_for(((reg_value = I915_READ_NOTRACE(reg)) & mask) == val, 3);
+ ret = wait_for(((reg_value =
+ intel_uncore_read_notrace(&i915->uncore, reg)) & mask)
+ == val, 3);
/* just trace the final value */
trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
@@ -2857,6 +2898,7 @@ static int intel_runtime_suspend(struct device *kdev)
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
int ret;
if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
@@ -2867,7 +2909,7 @@ static int intel_runtime_suspend(struct device *kdev)
DRM_DEBUG_KMS("Suspending device\n");
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(rpm);
/*
* We are safe here against re-faults, since the fault handler takes
@@ -2875,7 +2917,7 @@ static int intel_runtime_suspend(struct device *kdev)
*/
i915_gem_runtime_suspend(dev_priv);
- intel_uc_suspend(dev_priv);
+ intel_uc_runtime_suspend(dev_priv);
intel_runtime_pm_disable_interrupts(dev_priv);
@@ -2905,18 +2947,18 @@ static int intel_runtime_suspend(struct device *kdev)
i915_gem_init_swizzling(dev_priv);
i915_gem_restore_fences(dev_priv);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(rpm);
return ret;
}
- enable_rpm_wakeref_asserts(dev_priv);
- intel_runtime_pm_cleanup(dev_priv);
+ enable_rpm_wakeref_asserts(rpm);
+ intel_runtime_pm_cleanup(rpm);
if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
DRM_ERROR("Unclaimed access detected prior to suspending\n");
- dev_priv->runtime_pm.suspended = true;
+ rpm->suspended = true;
/*
* FIXME: We really should find a document that references the arguments
@@ -2955,6 +2997,7 @@ static int intel_runtime_resume(struct device *kdev)
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
int ret = 0;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
@@ -2962,11 +3005,11 @@ static int intel_runtime_resume(struct device *kdev)
DRM_DEBUG_KMS("Resuming device\n");
- WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
- disable_rpm_wakeref_asserts(dev_priv);
+ WARN_ON_ONCE(atomic_read(&rpm->wakeref_count));
+ disable_rpm_wakeref_asserts(rpm);
intel_opregion_notify_adapter(dev_priv, PCI_D0);
- dev_priv->runtime_pm.suspended = false;
+ rpm->suspended = false;
if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
@@ -3016,7 +3059,7 @@ static int intel_runtime_resume(struct device *kdev)
intel_enable_ipc(dev_priv);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(rpm);
if (ret)
DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
@@ -3098,7 +3141,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
@@ -3111,13 +3154,13 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
@@ -3136,7 +3179,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
- DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
@@ -3148,6 +3191,8 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
};
static struct drm_driver driver = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 066fd2a12851..bc909ec5d9c3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -54,6 +54,7 @@
#include <drm/drm_cache.h>
#include <drm/drm_util.h>
#include <drm/drm_dsc.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_connector.h>
#include <drm/i915_mei_hdcp_interface.h>
@@ -62,23 +63,27 @@
#include "i915_reg.h"
#include "i915_utils.h"
-#include "intel_bios.h"
+#include "display/intel_bios.h"
+#include "display/intel_display.h"
+#include "display/intel_display_power.h"
+#include "display/intel_dpll_mgr.h"
+#include "display/intel_frontbuffer.h"
+#include "display/intel_opregion.h"
+
+#include "gt/intel_lrc.h"
+#include "gt/intel_engine.h"
+#include "gt/intel_workarounds.h"
+
#include "intel_device_info.h"
-#include "intel_display.h"
-#include "intel_dpll_mgr.h"
-#include "intel_frontbuffer.h"
-#include "intel_lrc.h"
-#include "intel_opregion.h"
-#include "intel_ringbuffer.h"
+#include "intel_runtime_pm.h"
#include "intel_uc.h"
#include "intel_uncore.h"
+#include "intel_wakeref.h"
#include "intel_wopcm.h"
-#include "intel_workarounds.h"
#include "i915_gem.h"
-#include "i915_gem_context.h"
+#include "gem/i915_gem_context_types.h"
#include "i915_gem_fence_reg.h"
-#include "i915_gem_object.h"
#include "i915_gem_gtt.h"
#include "i915_gpu_error.h"
#include "i915_request.h"
@@ -93,8 +98,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20190417"
-#define DRIVER_TIMESTAMP 1555492067
+#define DRIVER_DATE "20190619"
+#define DRIVER_TIMESTAMP 1560947544
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -133,7 +138,7 @@ bool i915_error_injected(void);
__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__)
-typedef depot_stack_handle_t intel_wakeref_t;
+struct drm_i915_gem_object;
enum hpd_pin {
HPD_NONE = 0,
@@ -210,12 +215,6 @@ struct drm_i915_file_private {
struct {
spinlock_t lock;
struct list_head request_list;
-/* 20ms is a fairly arbitrary limit (greater than the average frame time)
- * chosen to prevent the CPU getting more than a frame ahead of the GPU
- * (when using lax throttling for the frontbuffer). We also use it to
- * offer free GPU waitboosts for severely congested workloads.
- */
-#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
} mm;
struct idr context_idr;
@@ -297,7 +296,7 @@ struct drm_i915_display_funcs {
struct intel_crtc_state *cstate);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
void (*update_wm)(struct intel_crtc *crtc);
- int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
+ int (*modeset_calc_cdclk)(struct intel_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
@@ -342,12 +341,9 @@ struct drm_i915_display_funcs {
* involved with the same commit.
*/
void (*load_luts)(const struct intel_crtc_state *crtc_state);
+ void (*read_luts)(struct intel_crtc_state *crtc_state);
};
-#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
-#define CSR_VERSION_MAJOR(version) ((version) >> 16)
-#define CSR_VERSION_MINOR(version) ((version) & 0xffff)
-
struct intel_csr {
struct work_struct work;
const char *fw_path;
@@ -357,8 +353,8 @@ struct intel_csr {
u32 dmc_fw_size; /* dwords */
u32 version;
u32 mmio_count;
- i915_reg_t mmioaddr[8];
- u32 mmiodata[8];
+ i915_reg_t mmioaddr[20];
+ u32 mmiodata[20];
u32 dc_state;
u32 allowed_dc_mask;
intel_wakeref_t wakeref;
@@ -535,15 +531,10 @@ enum intel_pch {
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
- PCH_SPT, /* Sunrisepoint PCH */
- PCH_KBP, /* Kaby Lake PCH */
+ PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
PCH_CNP, /* Cannon/Comet Lake PCH */
PCH_ICP, /* Ice Lake PCH */
-};
-
-enum intel_sbi_destination {
- SBI_ICLK,
- SBI_MPHY,
+ PCH_MCC, /* Mule Creek Canyon PCH */
};
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
@@ -648,6 +639,8 @@ struct intel_rps_ei {
};
struct intel_rps {
+ struct mutex lock; /* protects enabling and the worker */
+
/*
* work, interrupts_enabled and pm_iir are protected by
* dev_priv->irq_lock
@@ -739,111 +732,6 @@ struct intel_ilk_power_mgmt {
int r_t;
};
-struct drm_i915_private;
-struct i915_power_well;
-
-struct i915_power_well_ops {
- /*
- * Synchronize the well's hw state to match the current sw state, for
- * example enable/disable it based on the current refcount. Called
- * during driver init and resume time, possibly after first calling
- * the enable/disable handlers.
- */
- void (*sync_hw)(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well);
- /*
- * Enable the well and resources that depend on it (for example
- * interrupts located on the well). Called after the 0->1 refcount
- * transition.
- */
- void (*enable)(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well);
- /*
- * Disable the well and resources that depend on it. Called after
- * the 1->0 refcount transition.
- */
- void (*disable)(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well);
- /* Returns the hw enabled state. */
- bool (*is_enabled)(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well);
-};
-
-struct i915_power_well_regs {
- i915_reg_t bios;
- i915_reg_t driver;
- i915_reg_t kvmr;
- i915_reg_t debug;
-};
-
-/* Power well structure for haswell */
-struct i915_power_well_desc {
- const char *name;
- bool always_on;
- u64 domains;
- /* unique identifier for this power well */
- enum i915_power_well_id id;
- /*
- * Arbitraty data associated with this power well. Platform and power
- * well specific.
- */
- union {
- struct {
- /*
- * request/status flag index in the PUNIT power well
- * control/status registers.
- */
- u8 idx;
- } vlv;
- struct {
- enum dpio_phy phy;
- } bxt;
- struct {
- const struct i915_power_well_regs *regs;
- /*
- * request/status flag index in the power well
- * constrol/status registers.
- */
- u8 idx;
- /* Mask of pipes whose IRQ logic is backed by the pw */
- u8 irq_pipe_mask;
- /* The pw is backing the VGA functionality */
- bool has_vga:1;
- bool has_fuses:1;
- /*
- * The pw is for an ICL+ TypeC PHY port in
- * Thunderbolt mode.
- */
- bool is_tc_tbt:1;
- } hsw;
- };
- const struct i915_power_well_ops *ops;
-};
-
-struct i915_power_well {
- const struct i915_power_well_desc *desc;
- /* power well enable/disable usage count */
- int count;
- /* cached hw enabled state */
- bool hw_enabled;
-};
-
-struct i915_power_domains {
- /*
- * Power wells needed for initialization at driver init and suspend
- * time are on. They are kept on until after the first modeset.
- */
- bool initializing;
- bool display_core_suspended;
- int power_well_count;
-
- intel_wakeref_t wakeref;
-
- struct mutex lock;
- int domain_use_count[POWER_DOMAIN_NUM];
- struct i915_power_well *power_wells;
-};
-
#define MAX_L3_SLICES 2
struct intel_l3_parity {
u32 *remap_info[MAX_L3_SLICES];
@@ -861,20 +749,15 @@ struct i915_gem_mm {
/* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
spinlock_t obj_lock;
- /** List of all objects in gtt_space. Used to restore gtt
- * mappings on resume */
- struct list_head bound_list;
/**
- * List of objects which are not bound to the GTT (thus
- * are idle and not used by the GPU). These objects may or may
- * not actually have any pages attached.
+ * List of objects which are purgeable.
*/
- struct list_head unbound_list;
+ struct list_head purge_list;
- /** List of all objects in gtt_space, currently mmaped by userspace.
- * All objects within this list must also be on bound_list.
+ /**
+ * List of objects which have allocated pages and are shrinkable.
*/
- struct list_head userfault_list;
+ struct list_head shrink_list;
/**
* List of objects which are pending destruction.
@@ -899,15 +782,12 @@ struct i915_gem_mm {
struct vfsmount *gemfs;
/** PPGTT used for aliasing the PPGTT with the GTT */
- struct i915_hw_ppgtt *aliasing_ppgtt;
+ struct i915_ppgtt *aliasing_ppgtt;
struct notifier_block oom_notifier;
struct notifier_block vmap_notifier;
struct shrinker shrinker;
- /** LRU list of objects with fence regs on them. */
- struct list_head fence_list;
-
/**
* Workqueue to fault in userptr pages, flushed by the execbuf
* when required but otherwise left to userspace to try again
@@ -925,10 +805,9 @@ struct i915_gem_mm {
/** Bit 6 swizzling required for Y tiling */
u32 bit_6_swizzle_y;
- /* accounting, useful for userland debugging */
- spinlock_t object_stat_lock;
- u64 object_memory;
- u32 object_count;
+ /* shrinker accounting, also useful for userland debugging */
+ u64 shrink_memory;
+ u32 shrink_count;
};
#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
@@ -942,6 +821,9 @@ struct i915_gem_mm {
#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */
struct ddi_vbt_port_info {
+ /* Non-NULL if port present. */
+ const struct child_device_config *child;
+
int max_tmds_clock;
/*
@@ -952,7 +834,6 @@ struct ddi_vbt_port_info {
#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
u8 hdmi_level_shift;
- u8 present:1;
u8 supports_dvi:1;
u8 supports_hdmi:1;
u8 supports_dp:1;
@@ -1154,54 +1035,6 @@ struct skl_wm_params {
u32 dbuf_block_size;
};
-/*
- * This struct helps tracking the state needed for runtime PM, which puts the
- * device in PCI D3 state. Notice that when this happens, nothing on the
- * graphics device works, even register access, so we don't get interrupts nor
- * anything else.
- *
- * Every piece of our code that needs to actually touch the hardware needs to
- * either call intel_runtime_pm_get or call intel_display_power_get with the
- * appropriate power domain.
- *
- * Our driver uses the autosuspend delay feature, which means we'll only really
- * suspend if we stay with zero refcount for a certain amount of time. The
- * default value is currently very conservative (see intel_runtime_pm_enable), but
- * it can be changed with the standard runtime PM files from sysfs.
- *
- * The irqs_disabled variable becomes true exactly after we disable the IRQs and
- * goes back to false exactly before we reenable the IRQs. We use this variable
- * to check if someone is trying to enable/disable IRQs while they're supposed
- * to be disabled. This shouldn't happen and we'll print some error messages in
- * case it happens.
- *
- * For more, read the Documentation/power/runtime_pm.txt.
- */
-struct i915_runtime_pm {
- atomic_t wakeref_count;
- bool suspended;
- bool irqs_enabled;
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
- /*
- * To aide detection of wakeref leaks and general misuse, we
- * track all wakeref holders. With manual markup (i.e. returning
- * a cookie to each rpm_get caller which they then supply to their
- * paired rpm_put) we can remove corresponding pairs of and keep
- * the array trimmed to active wakerefs.
- */
- struct intel_runtime_pm_debug {
- spinlock_t lock;
-
- depot_stack_handle_t last_acquire;
- depot_stack_handle_t last_release;
-
- depot_stack_handle_t *owners;
- unsigned long count;
- } debug;
-#endif
-};
-
enum intel_pipe_crc_source {
INTEL_PIPE_CRC_SOURCE_NONE,
INTEL_PIPE_CRC_SOURCE_PLANE1,
@@ -1561,6 +1394,7 @@ struct drm_i915_private {
/* Sideband mailbox protection */
struct mutex sb_lock;
+ struct pm_qos_request sb_qos;
/** Cached value of IMR to avoid reads in updating the bitfield */
union {
@@ -1594,9 +1428,6 @@ struct drm_i915_private {
/* protects panel power sequencer state */
struct mutex pps_mutex;
- struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
- int num_fence_regs; /* 8 on pre-965, 16 otherwise */
-
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_preferred_vco_freq;
unsigned int max_cdclk_freq;
@@ -1709,14 +1540,6 @@ struct drm_i915_private {
*/
u32 edram_size_mb;
- /*
- * Protects RPS/RC6 register access and PCU communication.
- * Must be taken after struct_mutex if nested. Note that
- * this lock may be held for long periods of time when
- * talking to hw - so only take it when talking to hw!
- */
- struct mutex pcu_lock;
-
/* gen6+ GT PM state */
struct intel_gen6_power_mgmt gt_pm;
@@ -1850,7 +1673,14 @@ struct drm_i915_private {
} type;
} dram_info;
- struct i915_runtime_pm runtime_pm;
+ struct intel_bw_info {
+ int num_planes;
+ int deratedbw[3];
+ } max_bw[6];
+
+ struct drm_private_obj bw_obj;
+
+ struct intel_runtime_pm runtime_pm;
struct {
bool initialized;
@@ -1995,8 +1825,6 @@ struct drm_i915_private {
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
- void (*cleanup_engine)(struct intel_engine_cs *engine);
-
struct i915_gt_timelines {
struct mutex mutex; /* protects list, tainted by GPU */
struct list_head active_list;
@@ -2006,10 +1834,12 @@ struct drm_i915_private {
struct list_head hwsp_free_list;
} timelines;
- intel_engine_mask_t active_engines;
struct list_head active_rings;
+
+ struct intel_wakeref wakeref;
+
struct list_head closed_vma;
- u32 active_requests;
+ spinlock_t closed_lock; /* guards the list of closed_vma */
/**
* Is the GPU currently considered idle, or busy executing
@@ -2020,6 +1850,16 @@ struct drm_i915_private {
*/
intel_wakeref_t awake;
+ struct blocking_notifier_head pm_notifications;
+
+ ktime_t last_init_time;
+
+ struct i915_vma *scratch;
+ } gt;
+
+ struct {
+ struct notifier_block pm_notifier;
+
/**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
@@ -2036,12 +1876,8 @@ struct drm_i915_private {
* arrive within a small period of time, we fire
* off the idle_work.
*/
- struct delayed_work idle_work;
-
- ktime_t last_init_time;
-
- struct i915_vma *scratch;
- } gt;
+ struct work_struct idle_work;
+ } gem;
/* For i945gm vblank irq vs. C3 workaround */
struct {
@@ -2162,111 +1998,6 @@ enum hdmi_force_audio {
GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
-/*
- * Optimised SGL iterator for GEM objects
- */
-static __always_inline struct sgt_iter {
- struct scatterlist *sgp;
- union {
- unsigned long pfn;
- dma_addr_t dma;
- };
- unsigned int curr;
- unsigned int max;
-} __sgt_iter(struct scatterlist *sgl, bool dma) {
- struct sgt_iter s = { .sgp = sgl };
-
- if (s.sgp) {
- s.max = s.curr = s.sgp->offset;
- s.max += s.sgp->length;
- if (dma)
- s.dma = sg_dma_address(s.sgp);
- else
- s.pfn = page_to_pfn(sg_page(s.sgp));
- }
-
- return s;
-}
-
-static inline struct scatterlist *____sg_next(struct scatterlist *sg)
-{
- ++sg;
- if (unlikely(sg_is_chain(sg)))
- sg = sg_chain_ptr(sg);
- return sg;
-}
-
-/**
- * __sg_next - return the next scatterlist entry in a list
- * @sg: The current sg entry
- *
- * Description:
- * If the entry is the last, return NULL; otherwise, step to the next
- * element in the array (@sg@+1). If that's a chain pointer, follow it;
- * otherwise just return the pointer to the current element.
- **/
-static inline struct scatterlist *__sg_next(struct scatterlist *sg)
-{
- return sg_is_last(sg) ? NULL : ____sg_next(sg);
-}
-
-/**
- * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
- * @__dmap: DMA address (output)
- * @__iter: 'struct sgt_iter' (iterator state, internal)
- * @__sgt: sg_table to iterate over (input)
- */
-#define for_each_sgt_dma(__dmap, __iter, __sgt) \
- for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
- ((__dmap) = (__iter).dma + (__iter).curr); \
- (((__iter).curr += I915_GTT_PAGE_SIZE) >= (__iter).max) ? \
- (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
-
-/**
- * for_each_sgt_page - iterate over the pages of the given sg_table
- * @__pp: page pointer (output)
- * @__iter: 'struct sgt_iter' (iterator state, internal)
- * @__sgt: sg_table to iterate over (input)
- */
-#define for_each_sgt_page(__pp, __iter, __sgt) \
- for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
- ((__pp) = (__iter).pfn == 0 ? NULL : \
- pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
- (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
- (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
-
-bool i915_sg_trim(struct sg_table *orig_st);
-
-static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
-{
- unsigned int page_sizes;
-
- page_sizes = 0;
- while (sg) {
- GEM_BUG_ON(sg->offset);
- GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE));
- page_sizes |= sg->length;
- sg = __sg_next(sg);
- }
-
- return page_sizes;
-}
-
-static inline unsigned int i915_sg_segment_size(void)
-{
- unsigned int size = swiotlb_max_segment();
-
- if (size == 0)
- return SCATTERLIST_MAX_SEGMENT;
-
- size = rounddown(size, PAGE_SIZE);
- /* swiotlb_max_segment_size can return 1 byte when it means one page. */
- if (size < PAGE_SIZE)
- size = PAGE_SIZE;
-
- return size;
-}
-
#define INTEL_INFO(dev_priv) (&(dev_priv)->__info)
#define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
@@ -2420,9 +2151,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
#define IS_KBL_ULX(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_AML_ULX(dev_priv) \
- (IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_AML) || \
- IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_AML))
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
@@ -2435,6 +2163,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
INTEL_INFO(dev_priv)->gt == 3)
#define IS_CFL_ULT(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
+#define IS_CFL_ULX(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 2)
#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
@@ -2444,8 +2174,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_ICL_WITH_PORT_F(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
-#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
-
#define SKL_REVID_A0 0x0
#define SKL_REVID_B0 0x1
#define SKL_REVID_C0 0x2
@@ -2585,6 +2313,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
#define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
+#define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps)
+
#define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr)
#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
@@ -2598,7 +2328,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
* properties, so we have separate macros to test them.
*/
#define HAS_GUC(dev_priv) (INTEL_INFO(dev_priv)->has_guc)
-#define HAS_GUC_CT(dev_priv) (INTEL_INFO(dev_priv)->has_guc_ct)
#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
@@ -2628,15 +2357,16 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
+#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
+#define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
-#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
#define HAS_PCH_LPT_LP(dev_priv) \
@@ -2708,29 +2438,10 @@ extern void i915_driver_unload(struct drm_device *dev);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
-extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
-extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
-extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
-extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
-int intel_engines_init_mmio(struct drm_i915_private *dev_priv);
-int intel_engines_init(struct drm_i915_private *dev_priv);
-
u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv);
-/* intel_hotplug.c */
-void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
- u32 pin_mask, u32 long_mask);
-void intel_hpd_init(struct drm_i915_private *dev_priv);
-void intel_hpd_init_work(struct drm_i915_private *dev_priv);
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
-enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
- enum port port);
-bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
-void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
-
-/* i915_irq.c */
static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
{
unsigned long delay;
@@ -2748,11 +2459,6 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
&dev_priv->gpu_error.hangcheck_work, delay);
}
-extern void intel_irq_init(struct drm_i915_private *dev_priv);
-extern void intel_irq_fini(struct drm_i915_private *dev_priv);
-int intel_irq_install(struct drm_i915_private *dev_priv);
-void intel_irq_uninstall(struct drm_i915_private *dev_priv);
-
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
{
return dev_priv->gvt;
@@ -2763,120 +2469,15 @@ static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
return dev_priv->vgpu.active;
}
-u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
- enum pipe pipe);
-void
-i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
- u32 status_mask);
-
-void
-i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
- u32 status_mask);
-
-void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
-void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
-void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
- u32 mask,
- u32 bits);
-void ilk_update_display_irq(struct drm_i915_private *dev_priv,
- u32 interrupt_mask,
- u32 enabled_irq_mask);
-static inline void
-ilk_enable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
-{
- ilk_update_display_irq(dev_priv, bits, bits);
-}
-static inline void
-ilk_disable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
-{
- ilk_update_display_irq(dev_priv, bits, 0);
-}
-void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- u32 interrupt_mask,
- u32 enabled_irq_mask);
-static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 bits)
-{
- bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
-}
-static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 bits)
-{
- bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
-}
-void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
- u32 interrupt_mask,
- u32 enabled_irq_mask);
-static inline void
-ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
-{
- ibx_display_interrupt_update(dev_priv, bits, bits);
-}
-static inline void
-ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
-{
- ibx_display_interrupt_update(dev_priv, bits, 0);
-}
-
/* i915_gem.c */
-int i915_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
-int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
void i915_gem_sanitize(struct drm_i915_private *i915);
int i915_gem_init_early(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
-void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
-void i915_gem_object_init(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_object_ops *ops);
-struct drm_i915_gem_object *
-i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size);
-struct drm_i915_gem_object *
-i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
- const void *data, size_t size);
-void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
-void i915_gem_free_object(struct drm_gem_object *obj);
-
static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
{
if (!atomic_read(&i915->mm.free_count))
@@ -2903,15 +2504,15 @@ static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
* grace period so that we catch work queued via RCU from the first
* pass. As neither drain_workqueue() nor flush_workqueue() report
* a result, we make an assumption that we only don't require more
- * than 2 passes to catch all recursive RCU delayed work.
+ * than 3 passes to catch all _recursive_ RCU delayed work.
*
*/
- int pass = 2;
+ int pass = 3;
do {
rcu_barrier();
i915_gem_drain_freed_objects(i915);
- drain_workqueue(i915->wq);
} while (--pass);
+ drain_workqueue(i915->wq);
}
struct i915_vma * __must_check
@@ -2922,160 +2523,9 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 flags);
int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
-void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
-static inline int __sg_page_count(const struct scatterlist *sg)
-{
- return sg->length >> PAGE_SHIFT;
-}
-
-struct scatterlist *
-i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- unsigned int n, unsigned int *offset);
-
-struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj,
- unsigned int n);
-
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
- unsigned int n);
-
-dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
- unsigned long n);
-
-void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
- struct sg_table *pages,
- unsigned int sg_page_sizes);
-int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
-
-static inline int __must_check
-i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
-{
- might_lock(&obj->mm.lock);
-
- if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
- return 0;
-
- return __i915_gem_object_get_pages(obj);
-}
-
-static inline bool
-i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
-{
- return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
-}
-
-static inline void
-__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
-{
- GEM_BUG_ON(!i915_gem_object_has_pages(obj));
-
- atomic_inc(&obj->mm.pages_pin_count);
-}
-
-static inline bool
-i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
-{
- return atomic_read(&obj->mm.pages_pin_count);
-}
-
-static inline void
-__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
-{
- GEM_BUG_ON(!i915_gem_object_has_pages(obj));
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
- atomic_dec(&obj->mm.pages_pin_count);
-}
-
-static inline void
-i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
-{
- __i915_gem_object_unpin_pages(obj);
-}
-
-enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
- I915_MM_NORMAL = 0,
- I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
-};
-
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass);
-void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
-
-enum i915_map_type {
- I915_MAP_WB = 0,
- I915_MAP_WC,
-#define I915_MAP_OVERRIDE BIT(31)
- I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
- I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
-};
-
-static inline enum i915_map_type
-i915_coherent_map_type(struct drm_i915_private *i915)
-{
- return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
-}
-
-/**
- * i915_gem_object_pin_map - return a contiguous mapping of the entire object
- * @obj: the object to map into kernel address space
- * @type: the type of mapping, used to select pgprot_t
- *
- * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
- * pages and then returns a contiguous mapping of the backing storage into
- * the kernel address space. Based on the @type of mapping, the PTE will be
- * set to either WriteBack or WriteCombine (via pgprot_t).
- *
- * The caller is responsible for calling i915_gem_object_unpin_map() when the
- * mapping is no longer required.
- *
- * Returns the pointer through which to access the mapped object, or an
- * ERR_PTR() on error.
- */
-void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
- enum i915_map_type type);
-
-void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
- unsigned long offset,
- unsigned long size);
-static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
-{
- __i915_gem_object_flush_map(obj, 0, obj->base.size);
-}
-
-/**
- * i915_gem_object_unpin_map - releases an earlier mapping
- * @obj: the object to unmap
- *
- * After pinning the object and mapping its pages, once you are finished
- * with your access, call i915_gem_object_unpin_map() to release the pin
- * upon the mapping. Once the pin count reaches zero, that mapping may be
- * removed.
- */
-static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
-{
- i915_gem_object_unpin_pages(obj);
-}
-
-int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
- unsigned int *needs_clflush);
-int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
- unsigned int *needs_clflush);
-#define CLFLUSH_BEFORE BIT(0)
-#define CLFLUSH_AFTER BIT(1)
-#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
-
-static inline void
-i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
-{
- i915_gem_object_unpin_pages(obj);
-}
-
static inline int __must_check
i915_mutex_lock_interruptible(struct drm_device *dev)
{
@@ -3123,36 +2573,15 @@ void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
+void i915_gem_fini_hw(struct drm_i915_private *dev_priv);
void i915_gem_fini(struct drm_i915_private *dev_priv);
-void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags, long timeout);
void i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
vm_fault_t i915_gem_fault(struct vm_fault *vmf);
-int i915_gem_object_wait(struct drm_i915_gem_object *obj,
- unsigned int flags,
- long timeout);
-int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
- unsigned int flags,
- const struct i915_sched_attr *attr);
-#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
-
-int __must_check
-i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
-int __must_check
-i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
-int __must_check
-i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
-struct i915_vma * __must_check
-i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
- u32 alignment,
- const struct i915_ggtt_view *view,
- unsigned int flags);
-void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
-int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
- int align);
+
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
@@ -3165,25 +2594,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags);
-static inline struct i915_hw_ppgtt *
-i915_vm_to_ppgtt(struct i915_address_space *vm)
-{
- return container_of(vm, struct i915_hw_ppgtt, vm);
-}
-
-/* i915_gem_fence_reg.c */
-struct drm_i915_fence_reg *
-i915_reserve_fence(struct drm_i915_private *dev_priv);
-void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
-
-void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
-
-void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
-void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
- struct sg_table *pages);
-void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
- struct sg_table *pages);
-
static inline struct i915_gem_context *
__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
{
@@ -3266,11 +2676,12 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915,
unsigned long target,
unsigned long *nr_scanned,
unsigned flags);
-#define I915_SHRINK_PURGEABLE 0x1
-#define I915_SHRINK_UNBOUND 0x2
-#define I915_SHRINK_BOUND 0x4
-#define I915_SHRINK_ACTIVE 0x8
-#define I915_SHRINK_VMAPS 0x10
+#define I915_SHRINK_UNBOUND BIT(0)
+#define I915_SHRINK_BOUND BIT(1)
+#define I915_SHRINK_ACTIVE BIT(2)
+#define I915_SHRINK_VMAPS BIT(3)
+#define I915_SHRINK_WRITEBACK BIT(4)
+
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
void i915_gem_shrinker_register(struct drm_i915_private *i915);
void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
@@ -3291,18 +2702,6 @@ u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
unsigned int tiling, unsigned int stride);
-/* i915_debugfs.c */
-#ifdef CONFIG_DEBUG_FS
-int i915_debugfs_register(struct drm_i915_private *dev_priv);
-int i915_debugfs_connector_add(struct drm_connector *connector);
-void intel_display_crc_init(struct drm_i915_private *dev_priv);
-#else
-static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
-static inline int i915_debugfs_connector_add(struct drm_connector *connector)
-{ return 0; }
-static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
-#endif
-
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_cmd_parser.c */
@@ -3330,56 +2729,6 @@ extern int i915_restore_state(struct drm_i915_private *dev_priv);
void i915_setup_sysfs(struct drm_i915_private *dev_priv);
void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
-/* intel_lpe_audio.c */
-int intel_lpe_audio_init(struct drm_i915_private *dev_priv);
-void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
-void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
-void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum port port,
- const void *eld, int ls_clock, bool dp_output);
-
-/* intel_i2c.c */
-extern int intel_setup_gmbus(struct drm_i915_private *dev_priv);
-extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv);
-extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
- unsigned int pin);
-extern int intel_gmbus_output_aksv(struct i2c_adapter *adapter);
-
-extern struct i2c_adapter *
-intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
-extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
-extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
-static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
-{
- return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
-}
-extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
-
-/* intel_bios.c */
-void intel_bios_init(struct drm_i915_private *dev_priv);
-void intel_bios_cleanup(struct drm_i915_private *dev_priv);
-bool intel_bios_is_valid_vbt(const void *buf, size_t size);
-bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
-bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
-bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
-bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
-bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
-bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
-bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
- enum port port);
-bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
- enum port port);
-enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
-
-/* intel_acpi.c */
-#ifdef CONFIG_ACPI
-extern void intel_register_dsm_handler(void);
-extern void intel_unregister_dsm_handler(void);
-#else
-static inline void intel_register_dsm_handler(void) { return; }
-static inline void intel_unregister_dsm_handler(void) { return; }
-#endif /* CONFIG_ACPI */
-
/* intel_device_info.c */
static inline struct intel_device_info *
mkwrite_device_info(struct drm_i915_private *dev_priv)
@@ -3387,20 +2736,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
return (struct intel_device_info *)INTEL_INFO(dev_priv);
}
-static inline struct intel_sseu
-intel_device_default_sseu(struct drm_i915_private *i915)
-{
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
- struct intel_sseu value = {
- .slice_mask = sseu->slice_mask,
- .subslice_mask = sseu->subslice_mask[0],
- .min_eus_per_subslice = sseu->max_eus_per_subslice,
- .max_eus_per_subslice = sseu->max_eus_per_subslice,
- };
-
- return value;
-}
-
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
extern int intel_modeset_init(struct drm_device *dev);
@@ -3410,158 +2745,23 @@ extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
extern void intel_display_resume(struct drm_device *dev);
extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
-extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
-extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
-extern void intel_rps_mark_interactive(struct drm_i915_private *i915,
- bool interactive);
-extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
- bool enable);
-void intel_dsc_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-/* overlay */
-extern struct intel_overlay_error_state *
-intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
-extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
- struct intel_overlay_error_state *error);
-
extern struct intel_display_error_state *
intel_display_capture_error_state(struct drm_i915_private *dev_priv);
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_display_error_state *error);
-int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
-int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
- u32 val, int fast_timeout_us,
- int slow_timeout_ms);
-#define sandybridge_pcode_write(dev_priv, mbox, val) \
- sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500, 0)
-
-int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
- u32 reply_mask, u32 reply, int timeout_base_ms);
-
-/* intel_sideband.c */
-u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
-int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
-u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
-u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
-void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
-u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
-void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
-u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
-void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
-u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
-void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
-u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
-void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
-u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
- enum intel_sbi_destination destination);
-void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
- enum intel_sbi_destination destination);
-u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
-void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
-
-/* intel_dpio_phy.c */
-void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
- enum dpio_phy *phy, enum dpio_channel *ch);
-void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
- enum port port, u32 margin, u32 scale,
- u32 enable, u32 deemphasis);
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy);
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy);
-u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count);
-void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
- u8 lane_lat_optim_mask);
-u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
-
-void chv_set_phy_signal_level(struct intel_encoder *encoder,
- u32 deemph_reg_value, u32 margin_reg_value,
- bool uniq_trans_scale);
-void chv_data_lane_soft_reset(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- bool reset);
-void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-void chv_phy_release_cl2_override(struct intel_encoder *encoder);
-void chv_phy_post_pll_disable(struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state);
-
-void vlv_set_phy_signal_level(struct intel_encoder *encoder,
- u32 demph_reg_value, u32 preemph_reg_value,
- u32 uniqtranscale_reg_value, u32 tx3_demph);
-void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-void vlv_phy_reset_lanes(struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state);
-
-/* intel_combo_phy.c */
-void icl_combo_phys_init(struct drm_i915_private *dev_priv);
-void icl_combo_phys_uninit(struct drm_i915_private *dev_priv);
-void cnl_combo_phys_init(struct drm_i915_private *dev_priv);
-void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv);
-
-int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
-int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
-u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
- const i915_reg_t reg);
-
-u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
-
-static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
- const i915_reg_t reg)
-{
- return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
-}
-
#define __I915_REG_OP(op__, dev_priv__, ...) \
intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
-#define I915_READ8(reg__) __I915_REG_OP(read8, dev_priv, (reg__))
-#define I915_WRITE8(reg__, val__) __I915_REG_OP(write8, dev_priv, (reg__), (val__))
-
-#define I915_READ16(reg__) __I915_REG_OP(read16, dev_priv, (reg__))
-#define I915_WRITE16(reg__, val__) __I915_REG_OP(write16, dev_priv, (reg__), (val__))
-#define I915_READ16_NOTRACE(reg__) __I915_REG_OP(read16_notrace, dev_priv, (reg__))
-#define I915_WRITE16_NOTRACE(reg__, val__) __I915_REG_OP(write16_notrace, dev_priv, (reg__), (val__))
-
#define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__))
#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
-#define I915_READ_NOTRACE(reg__) __I915_REG_OP(read_notrace, dev_priv, (reg__))
-#define I915_WRITE_NOTRACE(reg__, val__) __I915_REG_OP(write_notrace, dev_priv, (reg__), (val__))
-
-/* Be very careful with read/write 64-bit values. On 32-bit machines, they
- * will be implemented using 2 32-bit writes in an arbitrary order with
- * an arbitrary delay between them. This can cause the hardware to
- * act upon the intermediate value, possibly leading to corruption and
- * machine death. For this reason we do not support I915_WRITE64, or
- * dev_priv->uncore.funcs.mmio_writeq.
- *
- * When reading a 64-bit value as two 32-bit values, the delay may cause
- * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
- * occasionally a 64-bit register does not actualy support a full readq
- * and must be read using two 32-bit reads.
- *
- * You have been warned.
- */
-#define I915_READ64(reg__) __I915_REG_OP(read64, dev_priv, (reg__))
-#define I915_READ64_2x32(lower_reg__, upper_reg__) \
- __I915_REG_OP(read64_2x32, dev_priv, (lower_reg__), (upper_reg__))
#define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__))
-#define POSTING_READ16(reg__) __I915_REG_OP(posting_read16, dev_priv, (reg__))
/* These are untraced mmio-accessors that are only valid to be used inside
* critical sections, such as inside IRQ handlers, where forcewake is explicitly
@@ -3591,68 +2791,12 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
*/
#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
-#define I915_WRITE64_FW(reg__, val__) __I915_REG_OP(write64_fw, dev_priv, (reg__), (val__))
-#define POSTING_READ_FW(reg__) __I915_REG_OP(posting_read_fw, dev_priv, (reg__))
/* "Broadcast RGB" property */
#define INTEL_BROADCAST_RGB_AUTO 0
#define INTEL_BROADCAST_RGB_FULL 1
#define INTEL_BROADCAST_RGB_LIMITED 2
-static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
-{
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return VLV_VGACNTRL;
- else if (INTEL_GEN(dev_priv) >= 5)
- return CPU_VGACNTRL;
- else
- return VGACNTRL;
-}
-
-static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
-{
- unsigned long j = msecs_to_jiffies(m);
-
- return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
-}
-
-static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
-{
- /* nsecs_to_jiffies64() does not guard against overflow */
- if (NSEC_PER_SEC % HZ &&
- div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
- return MAX_JIFFY_OFFSET;
-
- return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
-}
-
-/*
- * If you need to wait X milliseconds between events A and B, but event B
- * doesn't happen exactly after event A, you record the timestamp (jiffies) of
- * when event A happened, then just before event B you call this function and
- * pass the timestamp as the first argument, and X as the second argument.
- */
-static inline void
-wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
-{
- unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
-
- /*
- * Don't re-read the value of "jiffies" every time since it may change
- * behind our back and break the math.
- */
- tmp_jiffies = jiffies;
- target_jiffies = timestamp_jiffies +
- msecs_to_jiffies_timeout(to_wait_ms);
-
- if (time_after(target_jiffies, tmp_jiffies)) {
- remaining_jiffies = target_jiffies - tmp_jiffies;
- while (remaining_jiffies)
- remaining_jiffies =
- schedule_timeout_uninterruptible(remaining_jiffies);
- }
-}
-
void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
@@ -3690,4 +2834,21 @@ static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
return i915_ggtt_offset(i915->gt.scratch);
}
+static inline enum i915_map_type
+i915_coherent_map_type(struct drm_i915_private *i915)
+{
+ return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
+}
+
+static inline void add_taint_for_CI(unsigned int taint)
+{
+ /*
+ * The system is "ok", just about surviving for the user, but
+ * CI results are now unreliable as the HW is very suspect.
+ * CI checks the taint state after every test and will reboot
+ * the machine if the kernel is tainted.
+ */
+ add_taint(taint, LOCKDEP_STILL_OK);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/i915_fixed.h
index 591dd89ba7af..6621595fe74c 100644
--- a/drivers/gpu/drm/i915/i915_fixed.h
+++ b/drivers/gpu/drm/i915/i915_fixed.h
@@ -71,7 +71,7 @@ static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
{
u64 tmp;
- tmp = (u64)val * mul.val;
+ tmp = mul_u32_u32(val, mul.val);
tmp = DIV_ROUND_UP_ULL(tmp, 1 << 16);
WARN_ON(tmp > U32_MAX);
@@ -83,7 +83,7 @@ static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
{
u64 tmp;
- tmp = (u64)val.val * mul.val;
+ tmp = mul_u32_u32(val.val, mul.val);
tmp = tmp >> 16;
return clamp_u64_to_fixed16(tmp);
@@ -114,7 +114,7 @@ static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul
{
u64 tmp;
- tmp = (u64)val * mul.val;
+ tmp = mul_u32_u32(val, mul.val);
return clamp_u64_to_fixed16(tmp);
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ad01c92aaf74..190ad54fb072 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -26,7 +26,6 @@
*/
#include <drm/drm_vma_manager.h>
-#include <drm/drm_pci.h>
#include <drm/i915_drm.h>
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
@@ -39,32 +38,27 @@
#include <linux/dma-buf.h>
#include <linux/mman.h>
+#include "display/intel_display.h"
+#include "display/intel_frontbuffer.h"
+
+#include "gem/i915_gem_clflush.h"
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_pm.h"
+#include "gem/i915_gemfs.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gt_pm.h"
+#include "gt/intel_mocs.h"
+#include "gt/intel_reset.h"
+#include "gt/intel_workarounds.h"
+
#include "i915_drv.h"
-#include "i915_gem_clflush.h"
-#include "i915_gemfs.h"
-#include "i915_globals.h"
-#include "i915_reset.h"
+#include "i915_scatterlist.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_drv.h"
-#include "intel_frontbuffer.h"
-#include "intel_mocs.h"
#include "intel_pm.h"
-#include "intel_workarounds.h"
-
-static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
-
-static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
-{
- if (obj->cache_dirty)
- return false;
-
- if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
- return true;
-
- return obj->pin_global; /* currently in use by HW, keep flushed */
-}
static int
insert_mappable_node(struct i915_ggtt *ggtt,
@@ -83,124 +77,6 @@ remove_mappable_node(struct drm_mm_node *node)
drm_mm_remove_node(node);
}
-/* some bookkeeping */
-static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
- u64 size)
-{
- spin_lock(&dev_priv->mm.object_stat_lock);
- dev_priv->mm.object_count++;
- dev_priv->mm.object_memory += size;
- spin_unlock(&dev_priv->mm.object_stat_lock);
-}
-
-static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
- u64 size)
-{
- spin_lock(&dev_priv->mm.object_stat_lock);
- dev_priv->mm.object_count--;
- dev_priv->mm.object_memory -= size;
- spin_unlock(&dev_priv->mm.object_stat_lock);
-}
-
-static void __i915_gem_park(struct drm_i915_private *i915)
-{
- intel_wakeref_t wakeref;
-
- GEM_TRACE("\n");
-
- lockdep_assert_held(&i915->drm.struct_mutex);
- GEM_BUG_ON(i915->gt.active_requests);
- GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
-
- if (!i915->gt.awake)
- return;
-
- /*
- * Be paranoid and flush a concurrent interrupt to make sure
- * we don't reactivate any irq tasklets after parking.
- *
- * FIXME: Note that even though we have waited for execlists to be idle,
- * there may still be an in-flight interrupt even though the CSB
- * is now empty. synchronize_irq() makes sure that a residual interrupt
- * is completed before we continue, but it doesn't prevent the HW from
- * raising a spurious interrupt later. To complete the shield we should
- * coordinate disabling the CS irq with flushing the interrupts.
- */
- synchronize_irq(i915->drm.irq);
-
- intel_engines_park(i915);
- i915_timelines_park(i915);
-
- i915_pmu_gt_parked(i915);
- i915_vma_parked(i915);
-
- wakeref = fetch_and_zero(&i915->gt.awake);
- GEM_BUG_ON(!wakeref);
-
- if (INTEL_GEN(i915) >= 6)
- gen6_rps_idle(i915);
-
- intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
-
- i915_globals_park();
-}
-
-void i915_gem_park(struct drm_i915_private *i915)
-{
- GEM_TRACE("\n");
-
- lockdep_assert_held(&i915->drm.struct_mutex);
- GEM_BUG_ON(i915->gt.active_requests);
-
- if (!i915->gt.awake)
- return;
-
- /* Defer the actual call to __i915_gem_park() to prevent ping-pongs */
- mod_delayed_work(i915->wq, &i915->gt.idle_work, msecs_to_jiffies(100));
-}
-
-void i915_gem_unpark(struct drm_i915_private *i915)
-{
- GEM_TRACE("\n");
-
- lockdep_assert_held(&i915->drm.struct_mutex);
- GEM_BUG_ON(!i915->gt.active_requests);
- assert_rpm_wakelock_held(i915);
-
- if (i915->gt.awake)
- return;
-
- /*
- * It seems that the DMC likes to transition between the DC states a lot
- * when there are no connected displays (no active power domains) during
- * command submission.
- *
- * This activity has negative impact on the performance of the chip with
- * huge latencies observed in the interrupt handler and elsewhere.
- *
- * Work around it by grabbing a GT IRQ power domain whilst there is any
- * GT activity, preventing any DC state transitions.
- */
- i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
- GEM_BUG_ON(!i915->gt.awake);
-
- i915_globals_unpark();
-
- intel_enable_gt_powersave(i915);
- i915_update_gfx_val(i915);
- if (INTEL_GEN(i915) >= 6)
- gen6_rps_busy(i915);
- i915_pmu_gt_unparked(i915);
-
- intel_engines_unpark(i915);
-
- i915_queue_hangcheck(i915);
-
- queue_delayed_work(i915->wq,
- &i915->gt.retire_work,
- round_jiffies_up_relative(HZ));
-}
-
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -225,178 +101,14 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
-{
- struct address_space *mapping = obj->base.filp->f_mapping;
- drm_dma_handle_t *phys;
- struct sg_table *st;
- struct scatterlist *sg;
- char *vaddr;
- int i;
- int err;
-
- if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
- return -EINVAL;
-
- /* Always aligning to the object size, allows a single allocation
- * to handle all possible callers, and given typical object sizes,
- * the alignment of the buddy allocation will naturally match.
- */
- phys = drm_pci_alloc(obj->base.dev,
- roundup_pow_of_two(obj->base.size),
- roundup_pow_of_two(obj->base.size));
- if (!phys)
- return -ENOMEM;
-
- vaddr = phys->vaddr;
- for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
- struct page *page;
- char *src;
-
- page = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
- goto err_phys;
- }
-
- src = kmap_atomic(page);
- memcpy(vaddr, src, PAGE_SIZE);
- drm_clflush_virt_range(vaddr, PAGE_SIZE);
- kunmap_atomic(src);
-
- put_page(page);
- vaddr += PAGE_SIZE;
- }
-
- i915_gem_chipset_flush(to_i915(obj->base.dev));
-
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st) {
- err = -ENOMEM;
- goto err_phys;
- }
-
- if (sg_alloc_table(st, 1, GFP_KERNEL)) {
- kfree(st);
- err = -ENOMEM;
- goto err_phys;
- }
-
- sg = st->sgl;
- sg->offset = 0;
- sg->length = obj->base.size;
-
- sg_dma_address(sg) = phys->busaddr;
- sg_dma_len(sg) = obj->base.size;
-
- obj->phys_handle = phys;
-
- __i915_gem_object_set_pages(obj, st, sg->length);
-
- return 0;
-
-err_phys:
- drm_pci_free(obj->base.dev, phys);
-
- return err;
-}
-
-static void __start_cpu_write(struct drm_i915_gem_object *obj)
-{
- obj->read_domains = I915_GEM_DOMAIN_CPU;
- obj->write_domain = I915_GEM_DOMAIN_CPU;
- if (cpu_write_needs_clflush(obj))
- obj->cache_dirty = true;
-}
-
-void
-__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
- struct sg_table *pages,
- bool needs_clflush)
-{
- GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
-
- if (obj->mm.madv == I915_MADV_DONTNEED)
- obj->mm.dirty = false;
-
- if (needs_clflush &&
- (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
- !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
- drm_clflush_sg(pages);
-
- __start_cpu_write(obj);
-}
-
-static void
-i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
- struct sg_table *pages)
-{
- __i915_gem_object_release_shmem(obj, pages, false);
-
- if (obj->mm.dirty) {
- struct address_space *mapping = obj->base.filp->f_mapping;
- char *vaddr = obj->phys_handle->vaddr;
- int i;
-
- for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
- struct page *page;
- char *dst;
-
- page = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(page))
- continue;
-
- dst = kmap_atomic(page);
- drm_clflush_virt_range(vaddr, PAGE_SIZE);
- memcpy(dst, vaddr, PAGE_SIZE);
- kunmap_atomic(dst);
-
- set_page_dirty(page);
- if (obj->mm.madv == I915_MADV_WILLNEED)
- mark_page_accessed(page);
- put_page(page);
- vaddr += PAGE_SIZE;
- }
- obj->mm.dirty = false;
- }
-
- sg_free_table(pages);
- kfree(pages);
-
- drm_pci_free(obj->base.dev, obj->phys_handle);
-}
-
-static void
-i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
-{
- i915_gem_object_unpin_pages(obj);
-}
-
-static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
- .get_pages = i915_gem_object_get_pages_phys,
- .put_pages = i915_gem_object_put_pages_phys,
- .release = i915_gem_object_release_phys,
-};
-
-static const struct drm_i915_gem_object_ops i915_gem_object_ops;
-
int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
LIST_HEAD(still_in_list);
- int ret;
+ int ret = 0;
lockdep_assert_held(&obj->base.dev->struct_mutex);
- /* Closed vma are removed from the obj->vma_list - but they may
- * still have an active binding on the object. To remove those we
- * must wait for all rendering to complete to the object (as unbinding
- * must anyway), and retire the requests.
- */
- ret = i915_gem_object_set_to_cpu_domain(obj, false);
- if (ret)
- return ret;
-
spin_lock(&obj->vma.lock);
while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
struct i915_vma,
@@ -414,190 +126,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return ret;
}
-static long
-i915_gem_object_wait_fence(struct dma_fence *fence,
- unsigned int flags,
- long timeout)
-{
- struct i915_request *rq;
-
- BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return timeout;
-
- if (!dma_fence_is_i915(fence))
- return dma_fence_wait_timeout(fence,
- flags & I915_WAIT_INTERRUPTIBLE,
- timeout);
-
- rq = to_request(fence);
- if (i915_request_completed(rq))
- goto out;
-
- timeout = i915_request_wait(rq, flags, timeout);
-
-out:
- if (flags & I915_WAIT_LOCKED && i915_request_completed(rq))
- i915_request_retire_upto(rq);
-
- return timeout;
-}
-
-static long
-i915_gem_object_wait_reservation(struct reservation_object *resv,
- unsigned int flags,
- long timeout)
-{
- unsigned int seq = __read_seqcount_begin(&resv->seq);
- struct dma_fence *excl;
- bool prune_fences = false;
-
- if (flags & I915_WAIT_ALL) {
- struct dma_fence **shared;
- unsigned int count, i;
- int ret;
-
- ret = reservation_object_get_fences_rcu(resv,
- &excl, &count, &shared);
- if (ret)
- return ret;
-
- for (i = 0; i < count; i++) {
- timeout = i915_gem_object_wait_fence(shared[i],
- flags, timeout);
- if (timeout < 0)
- break;
-
- dma_fence_put(shared[i]);
- }
-
- for (; i < count; i++)
- dma_fence_put(shared[i]);
- kfree(shared);
-
- /*
- * If both shared fences and an exclusive fence exist,
- * then by construction the shared fences must be later
- * than the exclusive fence. If we successfully wait for
- * all the shared fences, we know that the exclusive fence
- * must all be signaled. If all the shared fences are
- * signaled, we can prune the array and recover the
- * floating references on the fences/requests.
- */
- prune_fences = count && timeout >= 0;
- } else {
- excl = reservation_object_get_excl_rcu(resv);
- }
-
- if (excl && timeout >= 0)
- timeout = i915_gem_object_wait_fence(excl, flags, timeout);
-
- dma_fence_put(excl);
-
- /*
- * Opportunistically prune the fences iff we know they have *all* been
- * signaled and that the reservation object has not been changed (i.e.
- * no new fences have been added).
- */
- if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
- if (reservation_object_trylock(resv)) {
- if (!__read_seqcount_retry(&resv->seq, seq))
- reservation_object_add_excl_fence(resv, NULL);
- reservation_object_unlock(resv);
- }
- }
-
- return timeout;
-}
-
-static void __fence_set_priority(struct dma_fence *fence,
- const struct i915_sched_attr *attr)
-{
- struct i915_request *rq;
- struct intel_engine_cs *engine;
-
- if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
- return;
-
- rq = to_request(fence);
- engine = rq->engine;
-
- local_bh_disable();
- rcu_read_lock(); /* RCU serialisation for set-wedged protection */
- if (engine->schedule)
- engine->schedule(rq, attr);
- rcu_read_unlock();
- local_bh_enable(); /* kick the tasklets if queues were reprioritised */
-}
-
-static void fence_set_priority(struct dma_fence *fence,
- const struct i915_sched_attr *attr)
-{
- /* Recurse once into a fence-array */
- if (dma_fence_is_array(fence)) {
- struct dma_fence_array *array = to_dma_fence_array(fence);
- int i;
-
- for (i = 0; i < array->num_fences; i++)
- __fence_set_priority(array->fences[i], attr);
- } else {
- __fence_set_priority(fence, attr);
- }
-}
-
-int
-i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
- unsigned int flags,
- const struct i915_sched_attr *attr)
-{
- struct dma_fence *excl;
-
- if (flags & I915_WAIT_ALL) {
- struct dma_fence **shared;
- unsigned int count, i;
- int ret;
-
- ret = reservation_object_get_fences_rcu(obj->resv,
- &excl, &count, &shared);
- if (ret)
- return ret;
-
- for (i = 0; i < count; i++) {
- fence_set_priority(shared[i], attr);
- dma_fence_put(shared[i]);
- }
-
- kfree(shared);
- } else {
- excl = reservation_object_get_excl_rcu(obj->resv);
- }
-
- if (excl) {
- fence_set_priority(excl, attr);
- dma_fence_put(excl);
- }
- return 0;
-}
-
-/**
- * Waits for rendering to the object to be completed
- * @obj: i915 gem object
- * @flags: how to wait (under a lock, for all rendering or just for writes etc)
- * @timeout: how long to wait
- */
-int
-i915_gem_object_wait(struct drm_i915_gem_object *obj,
- unsigned int flags,
- long timeout)
-{
- might_sleep();
- GEM_BUG_ON(timeout < 0);
-
- timeout = i915_gem_object_wait_reservation(obj->resv, flags, timeout);
- return timeout < 0 ? timeout : 0;
-}
-
static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
@@ -636,7 +164,7 @@ i915_gem_create(struct drm_file *file,
return -EINVAL;
/* Allocate the new object */
- obj = i915_gem_object_create(dev_priv, size);
+ obj = i915_gem_object_create_shmem(dev_priv, size);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -656,19 +184,36 @@ i915_gem_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
+ int cpp = DIV_ROUND_UP(args->bpp, 8);
+ u32 format;
+
+ switch (cpp) {
+ case 1:
+ format = DRM_FORMAT_C8;
+ break;
+ case 2:
+ format = DRM_FORMAT_RGB565;
+ break;
+ case 4:
+ format = DRM_FORMAT_XRGB8888;
+ break;
+ default:
+ return -EINVAL;
+ }
+
/* have to work out size/pitch and return them */
- args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
+ args->pitch = ALIGN(args->width * cpp, 64);
+
+ /* align stride to page size so that we can remap */
+ if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
+ DRM_FORMAT_MOD_LINEAR))
+ args->pitch = ALIGN(args->pitch, 4096);
+
args->size = args->pitch * args->height;
return i915_gem_create(file, to_i915(dev),
&args->size, &args->handle);
}
-static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
-{
- return !(obj->cache_level == I915_CACHE_NONE ||
- obj->cache_level == I915_CACHE_WT);
-}
-
/**
* Creates a new mm object and returns a handle to it.
* @dev: drm device pointer
@@ -688,13 +233,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
&args->size, &args->handle);
}
-static inline enum fb_op_origin
-fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
-{
- return (domain == I915_GEM_DOMAIN_GTT ?
- obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
-}
-
void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
{
intel_wakeref_t wakeref;
@@ -725,171 +263,14 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
i915_gem_chipset_flush(dev_priv);
- with_intel_runtime_pm(dev_priv, wakeref) {
- spin_lock_irq(&dev_priv->uncore.lock);
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
+ struct intel_uncore *uncore = &dev_priv->uncore;
- POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
-
- spin_unlock_irq(&dev_priv->uncore.lock);
- }
-}
-
-static void
-flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct i915_vma *vma;
-
- if (!(obj->write_domain & flush_domains))
- return;
-
- switch (obj->write_domain) {
- case I915_GEM_DOMAIN_GTT:
- i915_gem_flush_ggtt_writes(dev_priv);
-
- intel_fb_obj_flush(obj,
- fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
-
- for_each_ggtt_vma(vma, obj) {
- if (vma->iomap)
- continue;
-
- i915_vma_unset_ggtt_write(vma);
- }
- break;
-
- case I915_GEM_DOMAIN_WC:
- wmb();
- break;
-
- case I915_GEM_DOMAIN_CPU:
- i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
- break;
-
- case I915_GEM_DOMAIN_RENDER:
- if (gpu_write_needs_clflush(obj))
- obj->cache_dirty = true;
- break;
- }
-
- obj->write_domain = 0;
-}
-
-/*
- * Pins the specified object's pages and synchronizes the object with
- * GPU accesses. Sets needs_clflush to non-zero if the caller should
- * flush the object from the CPU cache.
- */
-int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
- unsigned int *needs_clflush)
-{
- int ret;
-
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
- *needs_clflush = 0;
- if (!i915_gem_object_has_struct_page(obj))
- return -ENODEV;
-
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ret;
-
- if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
- !static_cpu_has(X86_FEATURE_CLFLUSH)) {
- ret = i915_gem_object_set_to_cpu_domain(obj, false);
- if (ret)
- goto err_unpin;
- else
- goto out;
- }
-
- flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
-
- /* If we're not in the cpu read domain, set ourself into the gtt
- * read domain and manually flush cachelines (if required). This
- * optimizes for the case when the gpu will dirty the data
- * anyway again before the next pread happens.
- */
- if (!obj->cache_dirty &&
- !(obj->read_domains & I915_GEM_DOMAIN_CPU))
- *needs_clflush = CLFLUSH_BEFORE;
-
-out:
- /* return with the pages pinned */
- return 0;
-
-err_unpin:
- i915_gem_object_unpin_pages(obj);
- return ret;
-}
-
-int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
- unsigned int *needs_clflush)
-{
- int ret;
-
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
- *needs_clflush = 0;
- if (!i915_gem_object_has_struct_page(obj))
- return -ENODEV;
-
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED |
- I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ret;
-
- if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
- !static_cpu_has(X86_FEATURE_CLFLUSH)) {
- ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (ret)
- goto err_unpin;
- else
- goto out;
+ spin_lock_irq(&uncore->lock);
+ intel_uncore_posting_read_fw(uncore,
+ RING_HEAD(RENDER_RING_BASE));
+ spin_unlock_irq(&uncore->lock);
}
-
- flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
-
- /* If we're not in the cpu write domain, set ourself into the
- * gtt write domain and manually flush cachelines (as required).
- * This optimizes for the case when the gpu will use the data
- * right away and we therefore have to clflush anyway.
- */
- if (!obj->cache_dirty) {
- *needs_clflush |= CLFLUSH_AFTER;
-
- /*
- * Same trick applies to invalidate partially written
- * cachelines read before writing.
- */
- if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
- *needs_clflush |= CLFLUSH_BEFORE;
- }
-
-out:
- intel_fb_obj_invalidate(obj, ORIGIN_CPU);
- obj->mm.dirty = true;
- /* return with the pages pinned */
- return 0;
-
-err_unpin:
- i915_gem_object_unpin_pages(obj);
- return ret;
}
static int
@@ -915,20 +296,21 @@ static int
i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args)
{
- char __user *user_data;
- u64 remain;
unsigned int needs_clflush;
unsigned int idx, offset;
+ struct dma_fence *fence;
+ char __user *user_data;
+ u64 remain;
int ret;
- ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
+ ret = i915_gem_object_prepare_read(obj, &needs_clflush);
if (ret)
return ret;
- ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
- mutex_unlock(&obj->base.dev->struct_mutex);
- if (ret)
- return ret;
+ fence = i915_gem_object_lock_fence(obj);
+ i915_gem_object_finish_access(obj);
+ if (!fence)
+ return -ENOMEM;
remain = args->size;
user_data = u64_to_user_ptr(args->data_ptr);
@@ -947,7 +329,7 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
offset = 0;
}
- i915_gem_obj_finish_shmem_access(obj);
+ i915_gem_object_unlock_fence(obj, fence);
return ret;
}
@@ -983,8 +365,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
struct i915_ggtt *ggtt = &i915->ggtt;
intel_wakeref_t wakeref;
struct drm_mm_node node;
- struct i915_vma *vma;
+ struct dma_fence *fence;
void __user *user_data;
+ struct i915_vma *vma;
u64 remain, offset;
int ret;
@@ -992,7 +375,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONFAULT |
@@ -1013,11 +396,24 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!node.allocated);
}
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ ret = i915_gem_object_lock_interruptible(obj);
if (ret)
goto out_unpin;
- mutex_unlock(&i915->drm.struct_mutex);
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (ret) {
+ i915_gem_object_unlock(obj);
+ goto out_unpin;
+ }
+
+ fence = i915_gem_object_lock_fence(obj);
+ i915_gem_object_unlock(obj);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size;
@@ -1055,8 +451,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
offset += page_length;
}
- mutex_lock(&i915->drm.struct_mutex);
+ i915_gem_object_unlock_fence(obj, fence);
out_unpin:
+ mutex_lock(&i915->drm.struct_mutex);
if (node.allocated) {
wmb();
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
@@ -1065,7 +462,7 @@ out_unpin:
i915_vma_unpin(vma);
}
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return ret;
@@ -1165,8 +562,10 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t wakeref;
struct drm_mm_node node;
+ struct dma_fence *fence;
struct i915_vma *vma;
u64 remain, offset;
void __user *user_data;
@@ -1184,14 +583,14 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
* This easily dwarfs any performance advantage from
* using the cache bypass of indirect GGTT access.
*/
- wakeref = intel_runtime_pm_get_if_in_use(i915);
+ wakeref = intel_runtime_pm_get_if_in_use(rpm);
if (!wakeref) {
ret = -EFAULT;
goto out_unlock;
}
} else {
/* No backing pages, no fallback, we must force GGTT access */
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(rpm);
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
@@ -1214,11 +613,24 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!node.allocated);
}
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ ret = i915_gem_object_lock_interruptible(obj);
if (ret)
goto out_unpin;
- mutex_unlock(&i915->drm.struct_mutex);
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret) {
+ i915_gem_object_unlock(obj);
+ goto out_unpin;
+ }
+
+ fence = i915_gem_object_lock_fence(obj);
+ i915_gem_object_unlock(obj);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
@@ -1263,8 +675,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
}
intel_fb_obj_flush(obj, ORIGIN_CPU);
- mutex_lock(&i915->drm.struct_mutex);
+ i915_gem_object_unlock_fence(obj, fence);
out_unpin:
+ mutex_lock(&i915->drm.struct_mutex);
if (node.allocated) {
wmb();
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
@@ -1273,7 +686,7 @@ out_unpin:
i915_vma_unpin(vma);
}
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(rpm, wakeref);
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return ret;
@@ -1310,22 +723,22 @@ static int
i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *args)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- void __user *user_data;
- u64 remain;
unsigned int partial_cacheline_write;
unsigned int needs_clflush;
unsigned int offset, idx;
+ struct dma_fence *fence;
+ void __user *user_data;
+ u64 remain;
int ret;
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ ret = i915_gem_object_prepare_write(obj, &needs_clflush);
if (ret)
return ret;
- ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
- mutex_unlock(&i915->drm.struct_mutex);
- if (ret)
- return ret;
+ fence = i915_gem_object_lock_fence(obj);
+ i915_gem_object_finish_access(obj);
+ if (!fence)
+ return -ENOMEM;
/* If we don't overwrite a cacheline completely we need to be
* careful to have up-to-date data by first clflushing. Don't
@@ -1354,7 +767,8 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
}
intel_fb_obj_flush(obj, ORIGIN_CPU);
- i915_gem_obj_finish_shmem_access(obj);
+ i915_gem_object_unlock_fence(obj, fence);
+
return ret;
}
@@ -1443,143 +857,6 @@ err:
return ret;
}
-static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct list_head *list;
- struct i915_vma *vma;
-
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
- mutex_lock(&i915->ggtt.vm.mutex);
- for_each_ggtt_vma(vma, obj) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- list_move_tail(&vma->vm_link, &vma->vm->bound_list);
- }
- mutex_unlock(&i915->ggtt.vm.mutex);
-
- spin_lock(&i915->mm.obj_lock);
- list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
- list_move_tail(&obj->mm.link, list);
- spin_unlock(&i915->mm.obj_lock);
-}
-
-/**
- * Called when user space prepares to use an object with the CPU, either
- * through the mmap ioctl's mapping or a GTT mapping.
- * @dev: drm device
- * @data: ioctl data blob
- * @file: drm file
- */
-int
-i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_gem_set_domain *args = data;
- struct drm_i915_gem_object *obj;
- u32 read_domains = args->read_domains;
- u32 write_domain = args->write_domain;
- int err;
-
- /* Only handle setting domains to types used by the CPU. */
- if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
- return -EINVAL;
-
- /*
- * Having something in the write domain implies it's in the read
- * domain, and only that read domain. Enforce that in the request.
- */
- if (write_domain && read_domains != write_domain)
- return -EINVAL;
-
- if (!read_domains)
- return 0;
-
- obj = i915_gem_object_lookup(file, args->handle);
- if (!obj)
- return -ENOENT;
-
- /*
- * Already in the desired write domain? Nothing for us to do!
- *
- * We apply a little bit of cunning here to catch a broader set of
- * no-ops. If obj->write_domain is set, we must be in the same
- * obj->read_domains, and only that domain. Therefore, if that
- * obj->write_domain matches the request read_domains, we are
- * already in the same read/write domain and can skip the operation,
- * without having to further check the requested write_domain.
- */
- if (READ_ONCE(obj->write_domain) == read_domains) {
- err = 0;
- goto out;
- }
-
- /*
- * Try to flush the object off the GPU without holding the lock.
- * We will repeat the flush holding the lock in the normal manner
- * to catch cases where we are gazumped.
- */
- err = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_PRIORITY |
- (write_domain ? I915_WAIT_ALL : 0),
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- goto out;
-
- /*
- * Proxy objects do not control access to the backing storage, ergo
- * they cannot be used as a means to manipulate the cache domain
- * tracking for that backing storage. The proxy object is always
- * considered to be outside of any cache domain.
- */
- if (i915_gem_object_is_proxy(obj)) {
- err = -ENXIO;
- goto out;
- }
-
- /*
- * Flush and acquire obj->pages so that we are coherent through
- * direct access in memory with previous cached writes through
- * shmemfs and that our cache domain tracking remains valid.
- * For example, if the obj->filp was moved to swap without us
- * being notified and releasing the pages, we would mistakenly
- * continue to assume that the obj remained out of the CPU cached
- * domain.
- */
- err = i915_gem_object_pin_pages(obj);
- if (err)
- goto out;
-
- err = i915_mutex_lock_interruptible(dev);
- if (err)
- goto out_unpin;
-
- if (read_domains & I915_GEM_DOMAIN_WC)
- err = i915_gem_object_set_to_wc_domain(obj, write_domain);
- else if (read_domains & I915_GEM_DOMAIN_GTT)
- err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
- else
- err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
-
- /* And bump the LRU for this access */
- i915_gem_object_bump_inactive_ggtt(obj);
-
- mutex_unlock(&dev->struct_mutex);
-
- if (write_domain != 0)
- intel_fb_obj_invalidate(obj,
- fb_write_origin(obj, write_domain));
-
-out_unpin:
- i915_gem_object_unpin_pages(obj);
-out:
- i915_gem_object_put(obj);
- return err;
-}
-
/**
* Called when user space has done writes to this buffer
* @dev: drm device
@@ -1609,421 +886,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static inline bool
-__vma_matches(struct vm_area_struct *vma, struct file *filp,
- unsigned long addr, unsigned long size)
-{
- if (vma->vm_file != filp)
- return false;
-
- return vma->vm_start == addr &&
- (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
-}
-
-/**
- * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
- * it is mapped to.
- * @dev: drm device
- * @data: ioctl data blob
- * @file: drm file
- *
- * While the mapping holds a reference on the contents of the object, it doesn't
- * imply a ref on the object itself.
- *
- * IMPORTANT:
- *
- * DRM driver writers who look a this function as an example for how to do GEM
- * mmap support, please don't implement mmap support like here. The modern way
- * to implement DRM mmap support is with an mmap offset ioctl (like
- * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
- * That way debug tooling like valgrind will understand what's going on, hiding
- * the mmap call in a driver private ioctl will break that. The i915 driver only
- * does cpu mmaps this way because we didn't know better.
- */
-int
-i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_gem_mmap *args = data;
- struct drm_i915_gem_object *obj;
- unsigned long addr;
-
- if (args->flags & ~(I915_MMAP_WC))
- return -EINVAL;
-
- if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
- return -ENODEV;
-
- obj = i915_gem_object_lookup(file, args->handle);
- if (!obj)
- return -ENOENT;
-
- /* prime objects have no backing filp to GEM mmap
- * pages from.
- */
- if (!obj->base.filp) {
- addr = -ENXIO;
- goto err;
- }
-
- if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
- addr = -EINVAL;
- goto err;
- }
-
- addr = vm_mmap(obj->base.filp, 0, args->size,
- PROT_READ | PROT_WRITE, MAP_SHARED,
- args->offset);
- if (IS_ERR_VALUE(addr))
- goto err;
-
- if (args->flags & I915_MMAP_WC) {
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
-
- if (down_write_killable(&mm->mmap_sem)) {
- addr = -EINTR;
- goto err;
- }
- vma = find_vma(mm, addr);
- if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
- vma->vm_page_prot =
- pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- else
- addr = -ENOMEM;
- up_write(&mm->mmap_sem);
- if (IS_ERR_VALUE(addr))
- goto err;
-
- /* This may race, but that's ok, it only gets set */
- WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
- }
- i915_gem_object_put(obj);
-
- args->addr_ptr = (u64)addr;
- return 0;
-
-err:
- i915_gem_object_put(obj);
- return addr;
-}
-
-static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
-}
-
-/**
- * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
- *
- * A history of the GTT mmap interface:
- *
- * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
- * aligned and suitable for fencing, and still fit into the available
- * mappable space left by the pinned display objects. A classic problem
- * we called the page-fault-of-doom where we would ping-pong between
- * two objects that could not fit inside the GTT and so the memcpy
- * would page one object in at the expense of the other between every
- * single byte.
- *
- * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
- * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
- * object is too large for the available space (or simply too large
- * for the mappable aperture!), a view is created instead and faulted
- * into userspace. (This view is aligned and sized appropriately for
- * fenced access.)
- *
- * 2 - Recognise WC as a separate cache domain so that we can flush the
- * delayed writes via GTT before performing direct access via WC.
- *
- * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
- * pagefault; swapin remains transparent.
- *
- * Restrictions:
- *
- * * snoopable objects cannot be accessed via the GTT. It can cause machine
- * hangs on some architectures, corruption on others. An attempt to service
- * a GTT page fault from a snoopable object will generate a SIGBUS.
- *
- * * the object must be able to fit into RAM (physical memory, though no
- * limited to the mappable aperture).
- *
- *
- * Caveats:
- *
- * * a new GTT page fault will synchronize rendering from the GPU and flush
- * all data to system memory. Subsequent access will not be synchronized.
- *
- * * all mappings are revoked on runtime device suspend.
- *
- * * there are only 8, 16 or 32 fence registers to share between all users
- * (older machines require fence register for display and blitter access
- * as well). Contention of the fence registers will cause the previous users
- * to be unmapped and any new access will generate new page faults.
- *
- * * running out of memory while servicing a fault may generate a SIGBUS,
- * rather than the expected SIGSEGV.
- */
-int i915_gem_mmap_gtt_version(void)
-{
- return 3;
-}
-
-static inline struct i915_ggtt_view
-compute_partial_view(const struct drm_i915_gem_object *obj,
- pgoff_t page_offset,
- unsigned int chunk)
-{
- struct i915_ggtt_view view;
-
- if (i915_gem_object_is_tiled(obj))
- chunk = roundup(chunk, tile_row_pages(obj));
-
- view.type = I915_GGTT_VIEW_PARTIAL;
- view.partial.offset = rounddown(page_offset, chunk);
- view.partial.size =
- min_t(unsigned int, chunk,
- (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
-
- /* If the partial covers the entire object, just create a normal VMA. */
- if (chunk >= obj->base.size >> PAGE_SHIFT)
- view.type = I915_GGTT_VIEW_NORMAL;
-
- return view;
-}
-
-/**
- * i915_gem_fault - fault a page into the GTT
- * @vmf: fault info
- *
- * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
- * from userspace. The fault handler takes care of binding the object to
- * the GTT (if needed), allocating and programming a fence register (again,
- * only if needed based on whether the old reg is still valid or the object
- * is tiled) and inserting a new PTE into the faulting process.
- *
- * Note that the faulting process may involve evicting existing objects
- * from the GTT and/or fence registers to make room. So performance may
- * suffer if the GTT working set is large or there are few fence registers
- * left.
- *
- * The current feature set supported by i915_gem_fault() and thus GTT mmaps
- * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
- */
-vm_fault_t i915_gem_fault(struct vm_fault *vmf)
-{
-#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
- struct vm_area_struct *area = vmf->vma;
- struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- bool write = area->vm_flags & VM_WRITE;
- intel_wakeref_t wakeref;
- struct i915_vma *vma;
- pgoff_t page_offset;
- int srcu;
- int ret;
-
- /* Sanity check that we allow writing into this object */
- if (i915_gem_object_is_readonly(obj) && write)
- return VM_FAULT_SIGBUS;
-
- /* We don't use vmf->pgoff since that has the fake offset */
- page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
-
- trace_i915_gem_object_fault(obj, page_offset, true, write);
-
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- goto err;
-
- wakeref = intel_runtime_pm_get(dev_priv);
-
- srcu = i915_reset_trylock(dev_priv);
- if (srcu < 0) {
- ret = srcu;
- goto err_rpm;
- }
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto err_reset;
-
- /* Access to snoopable pages through the GTT is incoherent. */
- if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
- ret = -EFAULT;
- goto err_unlock;
- }
-
- /* Now pin it into the GTT as needed */
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONBLOCK |
- PIN_NONFAULT);
- if (IS_ERR(vma)) {
- /* Use a partial view if it is bigger than available space */
- struct i915_ggtt_view view =
- compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
- unsigned int flags;
-
- flags = PIN_MAPPABLE;
- if (view.type == I915_GGTT_VIEW_NORMAL)
- flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
-
- /*
- * Userspace is now writing through an untracked VMA, abandon
- * all hope that the hardware is able to track future writes.
- */
- obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
-
- vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
- if (IS_ERR(vma) && !view.type) {
- flags = PIN_MAPPABLE;
- view.type = I915_GGTT_VIEW_PARTIAL;
- vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
- }
- }
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_unlock;
- }
-
- ret = i915_vma_pin_fence(vma);
- if (ret)
- goto err_unpin;
-
- /* Finally, remap it using the new GTT offset */
- ret = remap_io_mapping(area,
- area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
- (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
- min_t(u64, vma->size, area->vm_end - area->vm_start),
- &ggtt->iomap);
- if (ret)
- goto err_fence;
-
- /* Mark as being mmapped into userspace for later revocation */
- assert_rpm_wakelock_held(dev_priv);
- if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
- list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
- GEM_BUG_ON(!obj->userfault_count);
-
- i915_vma_set_ggtt_write(vma);
-
-err_fence:
- i915_vma_unpin_fence(vma);
-err_unpin:
- __i915_vma_unpin(vma);
-err_unlock:
- mutex_unlock(&dev->struct_mutex);
-err_reset:
- i915_reset_unlock(dev_priv, srcu);
-err_rpm:
- intel_runtime_pm_put(dev_priv, wakeref);
- i915_gem_object_unpin_pages(obj);
-err:
- switch (ret) {
- case -EIO:
- /*
- * We eat errors when the gpu is terminally wedged to avoid
- * userspace unduly crashing (gl has no provisions for mmaps to
- * fail). But any other -EIO isn't ours (e.g. swap in failure)
- * and so needs to be reported.
- */
- if (!i915_terminally_wedged(dev_priv))
- return VM_FAULT_SIGBUS;
- /* else: fall through */
- case -EAGAIN:
- /*
- * EAGAIN means the gpu is hung and we'll wait for the error
- * handler to reset everything when re-faulting in
- * i915_mutex_lock_interruptible.
- */
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- case -EBUSY:
- /*
- * EBUSY is ok: this just means that another thread
- * already did the job.
- */
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- case -ENOSPC:
- case -EFAULT:
- return VM_FAULT_SIGBUS;
- default:
- WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
- return VM_FAULT_SIGBUS;
- }
-}
-
-static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
-{
- struct i915_vma *vma;
-
- GEM_BUG_ON(!obj->userfault_count);
-
- obj->userfault_count = 0;
- list_del(&obj->userfault_link);
- drm_vma_node_unmap(&obj->base.vma_node,
- obj->base.dev->anon_inode->i_mapping);
-
- for_each_ggtt_vma(vma, obj)
- i915_vma_unset_userfault(vma);
-}
-
-/**
- * i915_gem_release_mmap - remove physical page mappings
- * @obj: obj in question
- *
- * Preserve the reservation of the mmapping with the DRM core code, but
- * relinquish ownership of the pages back to the system.
- *
- * It is vital that we remove the page mapping if we have mapped a tiled
- * object through the GTT and then lose the fence register due to
- * resource pressure. Similarly if the object has been moved out of the
- * aperture, than pages mapped into userspace must be revoked. Removing the
- * mapping will then trigger a page fault on the next user access, allowing
- * fixup by i915_gem_fault().
- */
-void
-i915_gem_release_mmap(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- intel_wakeref_t wakeref;
-
- /* Serialisation between user GTT access and our code depends upon
- * revoking the CPU's PTE whilst the mutex is held. The next user
- * pagefault then has to wait until we release the mutex.
- *
- * Note that RPM complicates somewhat by adding an additional
- * requirement that operations to the GGTT be made holding the RPM
- * wakeref.
- */
- lockdep_assert_held(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
-
- if (!obj->userfault_count)
- goto out;
-
- __i915_gem_object_release_mmap(obj);
-
- /* Ensure that the CPU's PTE are revoked and there are not outstanding
- * memory transactions from userspace before we return. The TLB
- * flushing implied above by changing the PTE above *should* be
- * sufficient, an extra barrier here just provides us with a bit
- * of paranoid documentation about our requirement to serialise
- * memory writes before touching registers / GSM.
- */
- wmb();
-
-out:
- intel_runtime_pm_put(i915, wakeref);
-}
-
-void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
+void i915_gem_runtime_suspend(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj, *on;
int i;
@@ -2036,17 +899,19 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
*/
list_for_each_entry_safe(obj, on,
- &dev_priv->mm.userfault_list, userfault_link)
+ &i915->ggtt.userfault_list, userfault_link)
__i915_gem_object_release_mmap(obj);
- /* The fence will be lost when the device powers down. If any were
+ /*
+ * The fence will be lost when the device powers down. If any were
* in use by hardware (i.e. they are pinned), we should not be powering
* down! All other fences will be reacquired by the user upon waking.
*/
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+ for (i = 0; i < i915->ggtt.num_fences; i++) {
+ struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
- /* Ideally we want to assert that the fence register is not
+ /*
+ * Ideally we want to assert that the fence register is not
* live at this point (i.e. that no piece of code will be
* trying to write through fence + GTT, as that both violates
* our tracking of activity and associated locking/barriers,
@@ -2065,1056 +930,6 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
}
}
-static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- int err;
-
- err = drm_gem_create_mmap_offset(&obj->base);
- if (likely(!err))
- return 0;
-
- /* Attempt to reap some mmap space from dead objects */
- do {
- err = i915_gem_wait_for_idle(dev_priv,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- break;
-
- i915_gem_drain_freed_objects(dev_priv);
- err = drm_gem_create_mmap_offset(&obj->base);
- if (!err)
- break;
-
- } while (flush_delayed_work(&dev_priv->gt.retire_work));
-
- return err;
-}
-
-static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
-{
- drm_gem_free_mmap_offset(&obj->base);
-}
-
-int
-i915_gem_mmap_gtt(struct drm_file *file,
- struct drm_device *dev,
- u32 handle,
- u64 *offset)
-{
- struct drm_i915_gem_object *obj;
- int ret;
-
- obj = i915_gem_object_lookup(file, handle);
- if (!obj)
- return -ENOENT;
-
- ret = i915_gem_object_create_mmap_offset(obj);
- if (ret == 0)
- *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
-
- i915_gem_object_put(obj);
- return ret;
-}
-
-/**
- * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
- * @dev: DRM device
- * @data: GTT mapping ioctl data
- * @file: GEM object info
- *
- * Simply returns the fake offset to userspace so it can mmap it.
- * The mmap call will end up in drm_gem_mmap(), which will set things
- * up so we can get faults in the handler above.
- *
- * The fault handler will take care of binding the object into the GTT
- * (since it may have been evicted to make room for something), allocating
- * a fence register, and mapping the appropriate aperture address into
- * userspace.
- */
-int
-i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_gem_mmap_gtt *args = data;
-
- return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
-}
-
-/* Immediately discard the backing storage */
-static void
-i915_gem_object_truncate(struct drm_i915_gem_object *obj)
-{
- i915_gem_object_free_mmap_offset(obj);
-
- if (obj->base.filp == NULL)
- return;
-
- /* Our goal here is to return as much of the memory as
- * is possible back to the system as we are called from OOM.
- * To do this we must instruct the shmfs to drop all of its
- * backing pages, *now*.
- */
- shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
- obj->mm.madv = __I915_MADV_PURGED;
- obj->mm.pages = ERR_PTR(-EFAULT);
-}
-
-/* Try to discard unwanted pages */
-void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
-{
- struct address_space *mapping;
-
- lockdep_assert_held(&obj->mm.lock);
- GEM_BUG_ON(i915_gem_object_has_pages(obj));
-
- switch (obj->mm.madv) {
- case I915_MADV_DONTNEED:
- i915_gem_object_truncate(obj);
- case __I915_MADV_PURGED:
- return;
- }
-
- if (obj->base.filp == NULL)
- return;
-
- mapping = obj->base.filp->f_mapping,
- invalidate_mapping_pages(mapping, 0, (loff_t)-1);
-}
-
-/*
- * Move pages to appropriate lru and release the pagevec, decrementing the
- * ref count of those pages.
- */
-static void check_release_pagevec(struct pagevec *pvec)
-{
- check_move_unevictable_pages(pvec);
- __pagevec_release(pvec);
- cond_resched();
-}
-
-static void
-i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
- struct sg_table *pages)
-{
- struct sgt_iter sgt_iter;
- struct pagevec pvec;
- struct page *page;
-
- __i915_gem_object_release_shmem(obj, pages, true);
- i915_gem_gtt_finish_pages(obj, pages);
-
- if (i915_gem_object_needs_bit17_swizzle(obj))
- i915_gem_object_save_bit_17_swizzle(obj, pages);
-
- mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
-
- pagevec_init(&pvec);
- for_each_sgt_page(page, sgt_iter, pages) {
- if (obj->mm.dirty)
- set_page_dirty(page);
-
- if (obj->mm.madv == I915_MADV_WILLNEED)
- mark_page_accessed(page);
-
- if (!pagevec_add(&pvec, page))
- check_release_pagevec(&pvec);
- }
- if (pagevec_count(&pvec))
- check_release_pagevec(&pvec);
- obj->mm.dirty = false;
-
- sg_free_table(pages);
- kfree(pages);
-}
-
-static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
-{
- struct radix_tree_iter iter;
- void __rcu **slot;
-
- rcu_read_lock();
- radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
- radix_tree_delete(&obj->mm.get_page.radix, iter.index);
- rcu_read_unlock();
-}
-
-static struct sg_table *
-__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct sg_table *pages;
-
- pages = fetch_and_zero(&obj->mm.pages);
- if (IS_ERR_OR_NULL(pages))
- return pages;
-
- spin_lock(&i915->mm.obj_lock);
- list_del(&obj->mm.link);
- spin_unlock(&i915->mm.obj_lock);
-
- if (obj->mm.mapping) {
- void *ptr;
-
- ptr = page_mask_bits(obj->mm.mapping);
- if (is_vmalloc_addr(ptr))
- vunmap(ptr);
- else
- kunmap(kmap_to_page(ptr));
-
- obj->mm.mapping = NULL;
- }
-
- __i915_gem_object_reset_page_iter(obj);
- obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
-
- return pages;
-}
-
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass)
-{
- struct sg_table *pages;
- int ret;
-
- if (i915_gem_object_has_pinned_pages(obj))
- return -EBUSY;
-
- GEM_BUG_ON(obj->bind_count);
-
- /* May be called by shrinker from within get_pages() (on another bo) */
- mutex_lock_nested(&obj->mm.lock, subclass);
- if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
- ret = -EBUSY;
- goto unlock;
- }
-
- /*
- * ->put_pages might need to allocate memory for the bit17 swizzle
- * array, hence protect them from being reaped by removing them from gtt
- * lists early.
- */
- pages = __i915_gem_object_unset_pages(obj);
-
- /*
- * XXX Temporary hijinx to avoid updating all backends to handle
- * NULL pages. In the future, when we have more asynchronous
- * get_pages backends we should be better able to handle the
- * cancellation of the async task in a more uniform manner.
- */
- if (!pages && !i915_gem_object_needs_async_cancel(obj))
- pages = ERR_PTR(-EINVAL);
-
- if (!IS_ERR(pages))
- obj->ops->put_pages(obj, pages);
-
- ret = 0;
-unlock:
- mutex_unlock(&obj->mm.lock);
-
- return ret;
-}
-
-bool i915_sg_trim(struct sg_table *orig_st)
-{
- struct sg_table new_st;
- struct scatterlist *sg, *new_sg;
- unsigned int i;
-
- if (orig_st->nents == orig_st->orig_nents)
- return false;
-
- if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
- return false;
-
- new_sg = new_st.sgl;
- for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
- sg_set_page(new_sg, sg_page(sg), sg->length, 0);
- sg_dma_address(new_sg) = sg_dma_address(sg);
- sg_dma_len(new_sg) = sg_dma_len(sg);
-
- new_sg = sg_next(new_sg);
- }
- GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
-
- sg_free_table(orig_st);
-
- *orig_st = new_st;
- return true;
-}
-
-static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- const unsigned long page_count = obj->base.size / PAGE_SIZE;
- unsigned long i;
- struct address_space *mapping;
- struct sg_table *st;
- struct scatterlist *sg;
- struct sgt_iter sgt_iter;
- struct page *page;
- unsigned long last_pfn = 0; /* suppress gcc warning */
- unsigned int max_segment = i915_sg_segment_size();
- unsigned int sg_page_sizes;
- struct pagevec pvec;
- gfp_t noreclaim;
- int ret;
-
- /*
- * Assert that the object is not currently in any GPU domain. As it
- * wasn't in the GTT, there shouldn't be any way it could have been in
- * a GPU cache
- */
- GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
- GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
-
- /*
- * If there's no chance of allocating enough pages for the whole
- * object, bail early.
- */
- if (page_count > totalram_pages())
- return -ENOMEM;
-
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL)
- return -ENOMEM;
-
-rebuild_st:
- if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
- kfree(st);
- return -ENOMEM;
- }
-
- /*
- * Get the list of pages out of our struct file. They'll be pinned
- * at this point until we release them.
- *
- * Fail silently without starting the shrinker
- */
- mapping = obj->base.filp->f_mapping;
- mapping_set_unevictable(mapping);
- noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
- noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
-
- sg = st->sgl;
- st->nents = 0;
- sg_page_sizes = 0;
- for (i = 0; i < page_count; i++) {
- const unsigned int shrink[] = {
- I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
- 0,
- }, *s = shrink;
- gfp_t gfp = noreclaim;
-
- do {
- cond_resched();
- page = shmem_read_mapping_page_gfp(mapping, i, gfp);
- if (!IS_ERR(page))
- break;
-
- if (!*s) {
- ret = PTR_ERR(page);
- goto err_sg;
- }
-
- i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
-
- /*
- * We've tried hard to allocate the memory by reaping
- * our own buffer, now let the real VM do its job and
- * go down in flames if truly OOM.
- *
- * However, since graphics tend to be disposable,
- * defer the oom here by reporting the ENOMEM back
- * to userspace.
- */
- if (!*s) {
- /* reclaim and warn, but no oom */
- gfp = mapping_gfp_mask(mapping);
-
- /*
- * Our bo are always dirty and so we require
- * kswapd to reclaim our pages (direct reclaim
- * does not effectively begin pageout of our
- * buffers on its own). However, direct reclaim
- * only waits for kswapd when under allocation
- * congestion. So as a result __GFP_RECLAIM is
- * unreliable and fails to actually reclaim our
- * dirty pages -- unless you try over and over
- * again with !__GFP_NORETRY. However, we still
- * want to fail this allocation rather than
- * trigger the out-of-memory killer and for
- * this we want __GFP_RETRY_MAYFAIL.
- */
- gfp |= __GFP_RETRY_MAYFAIL;
- }
- } while (1);
-
- if (!i ||
- sg->length >= max_segment ||
- page_to_pfn(page) != last_pfn + 1) {
- if (i) {
- sg_page_sizes |= sg->length;
- sg = sg_next(sg);
- }
- st->nents++;
- sg_set_page(sg, page, PAGE_SIZE, 0);
- } else {
- sg->length += PAGE_SIZE;
- }
- last_pfn = page_to_pfn(page);
-
- /* Check that the i965g/gm workaround works. */
- WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
- }
- if (sg) { /* loop terminated early; short sg table */
- sg_page_sizes |= sg->length;
- sg_mark_end(sg);
- }
-
- /* Trim unused sg entries to avoid wasting memory. */
- i915_sg_trim(st);
-
- ret = i915_gem_gtt_prepare_pages(obj, st);
- if (ret) {
- /*
- * DMA remapping failed? One possible cause is that
- * it could not reserve enough large entries, asking
- * for PAGE_SIZE chunks instead may be helpful.
- */
- if (max_segment > PAGE_SIZE) {
- for_each_sgt_page(page, sgt_iter, st)
- put_page(page);
- sg_free_table(st);
-
- max_segment = PAGE_SIZE;
- goto rebuild_st;
- } else {
- dev_warn(&dev_priv->drm.pdev->dev,
- "Failed to DMA remap %lu pages\n",
- page_count);
- goto err_pages;
- }
- }
-
- if (i915_gem_object_needs_bit17_swizzle(obj))
- i915_gem_object_do_bit_17_swizzle(obj, st);
-
- __i915_gem_object_set_pages(obj, st, sg_page_sizes);
-
- return 0;
-
-err_sg:
- sg_mark_end(sg);
-err_pages:
- mapping_clear_unevictable(mapping);
- pagevec_init(&pvec);
- for_each_sgt_page(page, sgt_iter, st) {
- if (!pagevec_add(&pvec, page))
- check_release_pagevec(&pvec);
- }
- if (pagevec_count(&pvec))
- check_release_pagevec(&pvec);
- sg_free_table(st);
- kfree(st);
-
- /*
- * shmemfs first checks if there is enough memory to allocate the page
- * and reports ENOSPC should there be insufficient, along with the usual
- * ENOMEM for a genuine allocation failure.
- *
- * We use ENOSPC in our driver to mean that we have run out of aperture
- * space and so want to translate the error from shmemfs back to our
- * usual understanding of ENOMEM.
- */
- if (ret == -ENOSPC)
- ret = -ENOMEM;
-
- return ret;
-}
-
-void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
- struct sg_table *pages,
- unsigned int sg_page_sizes)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- unsigned long supported = INTEL_INFO(i915)->page_sizes;
- int i;
-
- lockdep_assert_held(&obj->mm.lock);
-
- /* Make the pages coherent with the GPU (flushing any swapin). */
- if (obj->cache_dirty) {
- obj->write_domain = 0;
- if (i915_gem_object_has_struct_page(obj))
- drm_clflush_sg(pages);
- obj->cache_dirty = false;
- }
-
- obj->mm.get_page.sg_pos = pages->sgl;
- obj->mm.get_page.sg_idx = 0;
-
- obj->mm.pages = pages;
-
- if (i915_gem_object_is_tiled(obj) &&
- i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
- GEM_BUG_ON(obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
- }
-
- GEM_BUG_ON(!sg_page_sizes);
- obj->mm.page_sizes.phys = sg_page_sizes;
-
- /*
- * Calculate the supported page-sizes which fit into the given
- * sg_page_sizes. This will give us the page-sizes which we may be able
- * to use opportunistically when later inserting into the GTT. For
- * example if phys=2G, then in theory we should be able to use 1G, 2M,
- * 64K or 4K pages, although in practice this will depend on a number of
- * other factors.
- */
- obj->mm.page_sizes.sg = 0;
- for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
- if (obj->mm.page_sizes.phys & ~0u << i)
- obj->mm.page_sizes.sg |= BIT(i);
- }
- GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
-
- spin_lock(&i915->mm.obj_lock);
- list_add(&obj->mm.link, &i915->mm.unbound_list);
- spin_unlock(&i915->mm.obj_lock);
-}
-
-static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
-{
- int err;
-
- if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
- DRM_DEBUG("Attempting to obtain a purgeable object\n");
- return -EFAULT;
- }
-
- err = obj->ops->get_pages(obj);
- GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
-
- return err;
-}
-
-/* Ensure that the associated pages are gathered from the backing storage
- * and pinned into our object. i915_gem_object_pin_pages() may be called
- * multiple times before they are released by a single call to
- * i915_gem_object_unpin_pages() - once the pages are no longer referenced
- * either as a result of memory pressure (reaping pages under the shrinker)
- * or as the object is itself released.
- */
-int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
-{
- int err;
-
- err = mutex_lock_interruptible(&obj->mm.lock);
- if (err)
- return err;
-
- if (unlikely(!i915_gem_object_has_pages(obj))) {
- GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
-
- err = ____i915_gem_object_get_pages(obj);
- if (err)
- goto unlock;
-
- smp_mb__before_atomic();
- }
- atomic_inc(&obj->mm.pages_pin_count);
-
-unlock:
- mutex_unlock(&obj->mm.lock);
- return err;
-}
-
-/* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
- enum i915_map_type type)
-{
- unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
- struct sg_table *sgt = obj->mm.pages;
- struct sgt_iter sgt_iter;
- struct page *page;
- struct page *stack_pages[32];
- struct page **pages = stack_pages;
- unsigned long i = 0;
- pgprot_t pgprot;
- void *addr;
-
- /* A single page can always be kmapped */
- if (n_pages == 1 && type == I915_MAP_WB)
- return kmap(sg_page(sgt->sgl));
-
- if (n_pages > ARRAY_SIZE(stack_pages)) {
- /* Too big for stack -- allocate temporary array instead */
- pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
- if (!pages)
- return NULL;
- }
-
- for_each_sgt_page(page, sgt_iter, sgt)
- pages[i++] = page;
-
- /* Check that we have the expected number of pages */
- GEM_BUG_ON(i != n_pages);
-
- switch (type) {
- default:
- MISSING_CASE(type);
- /* fallthrough to use PAGE_KERNEL anyway */
- case I915_MAP_WB:
- pgprot = PAGE_KERNEL;
- break;
- case I915_MAP_WC:
- pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
- break;
- }
- addr = vmap(pages, n_pages, 0, pgprot);
-
- if (pages != stack_pages)
- kvfree(pages);
-
- return addr;
-}
-
-/* get, pin, and map the pages of the object into kernel space */
-void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
- enum i915_map_type type)
-{
- enum i915_map_type has_type;
- bool pinned;
- void *ptr;
- int ret;
-
- if (unlikely(!i915_gem_object_has_struct_page(obj)))
- return ERR_PTR(-ENXIO);
-
- ret = mutex_lock_interruptible(&obj->mm.lock);
- if (ret)
- return ERR_PTR(ret);
-
- pinned = !(type & I915_MAP_OVERRIDE);
- type &= ~I915_MAP_OVERRIDE;
-
- if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
- if (unlikely(!i915_gem_object_has_pages(obj))) {
- GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
-
- ret = ____i915_gem_object_get_pages(obj);
- if (ret)
- goto err_unlock;
-
- smp_mb__before_atomic();
- }
- atomic_inc(&obj->mm.pages_pin_count);
- pinned = false;
- }
- GEM_BUG_ON(!i915_gem_object_has_pages(obj));
-
- ptr = page_unpack_bits(obj->mm.mapping, &has_type);
- if (ptr && has_type != type) {
- if (pinned) {
- ret = -EBUSY;
- goto err_unpin;
- }
-
- if (is_vmalloc_addr(ptr))
- vunmap(ptr);
- else
- kunmap(kmap_to_page(ptr));
-
- ptr = obj->mm.mapping = NULL;
- }
-
- if (!ptr) {
- ptr = i915_gem_object_map(obj, type);
- if (!ptr) {
- ret = -ENOMEM;
- goto err_unpin;
- }
-
- obj->mm.mapping = page_pack_bits(ptr, type);
- }
-
-out_unlock:
- mutex_unlock(&obj->mm.lock);
- return ptr;
-
-err_unpin:
- atomic_dec(&obj->mm.pages_pin_count);
-err_unlock:
- ptr = ERR_PTR(ret);
- goto out_unlock;
-}
-
-void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
- unsigned long offset,
- unsigned long size)
-{
- enum i915_map_type has_type;
- void *ptr;
-
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
- offset, size, obj->base.size));
-
- obj->mm.dirty = true;
-
- if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
- return;
-
- ptr = page_unpack_bits(obj->mm.mapping, &has_type);
- if (has_type == I915_MAP_WC)
- return;
-
- drm_clflush_virt_range(ptr + offset, size);
- if (size == obj->base.size) {
- obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
- obj->cache_dirty = false;
- }
-}
-
-static int
-i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_pwrite *arg)
-{
- struct address_space *mapping = obj->base.filp->f_mapping;
- char __user *user_data = u64_to_user_ptr(arg->data_ptr);
- u64 remain, offset;
- unsigned int pg;
-
- /* Caller already validated user args */
- GEM_BUG_ON(!access_ok(user_data, arg->size));
-
- /*
- * Before we instantiate/pin the backing store for our use, we
- * can prepopulate the shmemfs filp efficiently using a write into
- * the pagecache. We avoid the penalty of instantiating all the
- * pages, important if the user is just writing to a few and never
- * uses the object on the GPU, and using a direct write into shmemfs
- * allows it to avoid the cost of retrieving a page (either swapin
- * or clearing-before-use) before it is overwritten.
- */
- if (i915_gem_object_has_pages(obj))
- return -ENODEV;
-
- if (obj->mm.madv != I915_MADV_WILLNEED)
- return -EFAULT;
-
- /*
- * Before the pages are instantiated the object is treated as being
- * in the CPU domain. The pages will be clflushed as required before
- * use, and we can freely write into the pages directly. If userspace
- * races pwrite with any other operation; corruption will ensue -
- * that is userspace's prerogative!
- */
-
- remain = arg->size;
- offset = arg->offset;
- pg = offset_in_page(offset);
-
- do {
- unsigned int len, unwritten;
- struct page *page;
- void *data, *vaddr;
- int err;
- char c;
-
- len = PAGE_SIZE - pg;
- if (len > remain)
- len = remain;
-
- /* Prefault the user page to reduce potential recursion */
- err = __get_user(c, user_data);
- if (err)
- return err;
-
- err = __get_user(c, user_data + len - 1);
- if (err)
- return err;
-
- err = pagecache_write_begin(obj->base.filp, mapping,
- offset, len, 0,
- &page, &data);
- if (err < 0)
- return err;
-
- vaddr = kmap_atomic(page);
- unwritten = __copy_from_user_inatomic(vaddr + pg,
- user_data,
- len);
- kunmap_atomic(vaddr);
-
- err = pagecache_write_end(obj->base.filp, mapping,
- offset, len, len - unwritten,
- page, data);
- if (err < 0)
- return err;
-
- /* We don't handle -EFAULT, leave it to the caller to check */
- if (unwritten)
- return -ENODEV;
-
- remain -= len;
- user_data += len;
- offset += len;
- pg = 0;
- } while (remain);
-
- return 0;
-}
-
-static void
-i915_gem_retire_work_handler(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), gt.retire_work.work);
- struct drm_device *dev = &dev_priv->drm;
-
- /* Come back later if the device is busy... */
- if (mutex_trylock(&dev->struct_mutex)) {
- i915_retire_requests(dev_priv);
- mutex_unlock(&dev->struct_mutex);
- }
-
- /*
- * Keep the retire handler running until we are finally idle.
- * We do not need to do this test under locking as in the worst-case
- * we queue the retire worker once too often.
- */
- if (READ_ONCE(dev_priv->gt.awake))
- queue_delayed_work(dev_priv->wq,
- &dev_priv->gt.retire_work,
- round_jiffies_up_relative(HZ));
-}
-
-static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
- unsigned long mask)
-{
- bool result = true;
-
- /*
- * Even if we fail to switch, give whatever is running a small chance
- * to save itself before we report the failure. Yes, this may be a
- * false positive due to e.g. ENOMEM, caveat emptor!
- */
- if (i915_gem_switch_to_kernel_context(i915, mask))
- result = false;
-
- if (i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED |
- I915_WAIT_FOR_IDLE_BOOST,
- I915_GEM_IDLE_TIMEOUT))
- result = false;
-
- if (!result) {
- if (i915_modparams.reset) { /* XXX hide warning from gem_eio */
- dev_err(i915->drm.dev,
- "Failed to idle engines, declaring wedged!\n");
- GEM_TRACE_DUMP();
- }
-
- /* Forcibly cancel outstanding work and leave the gpu quiet. */
- i915_gem_set_wedged(i915);
- }
-
- i915_retire_requests(i915); /* ensure we flush after wedging */
- return result;
-}
-
-static bool load_power_context(struct drm_i915_private *i915)
-{
- /* Force loading the kernel context on all engines */
- if (!switch_to_kernel_context_sync(i915, ALL_ENGINES))
- return false;
-
- /*
- * Immediately park the GPU so that we enable powersaving and
- * treat it as idle. The next time we issue a request, we will
- * unpark and start using the engine->pinned_default_state, otherwise
- * it is in limbo and an early reset may fail.
- */
- __i915_gem_park(i915);
-
- return true;
-}
-
-static void
-i915_gem_idle_work_handler(struct work_struct *work)
-{
- struct drm_i915_private *i915 =
- container_of(work, typeof(*i915), gt.idle_work.work);
- bool rearm_hangcheck;
-
- if (!READ_ONCE(i915->gt.awake))
- return;
-
- if (READ_ONCE(i915->gt.active_requests))
- return;
-
- rearm_hangcheck =
- cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
-
- if (!mutex_trylock(&i915->drm.struct_mutex)) {
- /* Currently busy, come back later */
- mod_delayed_work(i915->wq,
- &i915->gt.idle_work,
- msecs_to_jiffies(50));
- goto out_rearm;
- }
-
- /*
- * Flush out the last user context, leaving only the pinned
- * kernel context resident. Should anything unfortunate happen
- * while we are idle (such as the GPU being power cycled), no users
- * will be harmed.
- */
- if (!work_pending(&i915->gt.idle_work.work) &&
- !i915->gt.active_requests) {
- ++i915->gt.active_requests; /* don't requeue idle */
-
- switch_to_kernel_context_sync(i915, i915->gt.active_engines);
-
- if (!--i915->gt.active_requests) {
- __i915_gem_park(i915);
- rearm_hangcheck = false;
- }
- }
-
- mutex_unlock(&i915->drm.struct_mutex);
-
-out_rearm:
- if (rearm_hangcheck) {
- GEM_BUG_ON(!i915->gt.awake);
- i915_queue_hangcheck(i915);
- }
-}
-
-void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
-{
- struct drm_i915_private *i915 = to_i915(gem->dev);
- struct drm_i915_gem_object *obj = to_intel_bo(gem);
- struct drm_i915_file_private *fpriv = file->driver_priv;
- struct i915_lut_handle *lut, *ln;
-
- mutex_lock(&i915->drm.struct_mutex);
-
- list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
- struct i915_gem_context *ctx = lut->ctx;
- struct i915_vma *vma;
-
- GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
- if (ctx->file_priv != fpriv)
- continue;
-
- vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
- GEM_BUG_ON(vma->obj != obj);
-
- /* We allow the process to have multiple handles to the same
- * vma, in the same fd namespace, by virtue of flink/open.
- */
- GEM_BUG_ON(!vma->open_count);
- if (!--vma->open_count && !i915_vma_is_ggtt(vma))
- i915_vma_close(vma);
-
- list_del(&lut->obj_link);
- list_del(&lut->ctx_link);
-
- i915_lut_handle_free(lut);
- __i915_gem_object_release_unless_active(obj);
- }
-
- mutex_unlock(&i915->drm.struct_mutex);
-}
-
-static unsigned long to_wait_timeout(s64 timeout_ns)
-{
- if (timeout_ns < 0)
- return MAX_SCHEDULE_TIMEOUT;
-
- if (timeout_ns == 0)
- return 0;
-
- return nsecs_to_jiffies_timeout(timeout_ns);
-}
-
-/**
- * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
- * @dev: drm device pointer
- * @data: ioctl data blob
- * @file: drm file pointer
- *
- * Returns 0 if successful, else an error is returned with the remaining time in
- * the timeout parameter.
- * -ETIME: object is still busy after timeout
- * -ERESTARTSYS: signal interrupted the wait
- * -ENONENT: object doesn't exist
- * Also possible, but rare:
- * -EAGAIN: incomplete, restart syscall
- * -ENOMEM: damn
- * -ENODEV: Internal IRQ fail
- * -E?: The add request failed
- *
- * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
- * non-zero timeout parameter the wait ioctl will wait for the given number of
- * nanoseconds on an object becoming unbusy. Since the wait itself does so
- * without holding struct_mutex the object may become re-busied before this
- * function completes. A similar but shorter * race condition exists in the busy
- * ioctl
- */
-int
-i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
-{
- struct drm_i915_gem_wait *args = data;
- struct drm_i915_gem_object *obj;
- ktime_t start;
- long ret;
-
- if (args->flags != 0)
- return -EINVAL;
-
- obj = i915_gem_object_lookup(file, args->bo_handle);
- if (!obj)
- return -ENOENT;
-
- start = ktime_get();
-
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_PRIORITY |
- I915_WAIT_ALL,
- to_wait_timeout(args->timeout_ns));
-
- if (args->timeout_ns > 0) {
- args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
- if (args->timeout_ns < 0)
- args->timeout_ns = 0;
-
- /*
- * Apparently ktime isn't accurate enough and occasionally has a
- * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
- * things up to make the test happy. We allow up to 1 jiffy.
- *
- * This is a regression from the timespec->ktime conversion.
- */
- if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
- args->timeout_ns = 0;
-
- /* Asked to wait beyond the jiffie/scheduler precision? */
- if (ret == -ETIME && args->timeout_ns)
- ret = -EAGAIN;
- }
-
- i915_gem_object_put(obj);
- return ret;
-}
-
static int wait_for_engines(struct drm_i915_private *i915)
{
if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
@@ -3135,9 +950,6 @@ wait_for_timelines(struct drm_i915_private *i915,
struct i915_gt_timelines *gt = &i915->gt.timelines;
struct i915_timeline *tl;
- if (!READ_ONCE(i915->gt.active_requests))
- return timeout;
-
mutex_lock(&gt->mutex);
list_for_each_entry(tl, &gt->active_list, link) {
struct i915_request *rq;
@@ -3177,9 +989,10 @@ wait_for_timelines(struct drm_i915_private *i915,
int i915_gem_wait_for_idle(struct drm_i915_private *i915,
unsigned int flags, long timeout)
{
- GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
+ GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n",
flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
- timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
+ timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "",
+ yesno(i915->gt.awake));
/* If the device is asleep, we have no requests outstanding */
if (!READ_ONCE(i915->gt.awake))
@@ -3204,565 +1017,6 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
return 0;
}
-static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
-{
- /*
- * We manually flush the CPU domain so that we can override and
- * force the flush for the display, and perform it asyncrhonously.
- */
- flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
- if (obj->cache_dirty)
- i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
- obj->write_domain = 0;
-}
-
-void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
-{
- if (!READ_ONCE(obj->pin_global))
- return;
-
- mutex_lock(&obj->base.dev->struct_mutex);
- __i915_gem_object_flush_for_display(obj);
- mutex_unlock(&obj->base.dev->struct_mutex);
-}
-
-/**
- * Moves a single object to the WC read, and possibly write domain.
- * @obj: object to act on
- * @write: ask for write access or read only
- *
- * This function returns when the move is complete, including waiting on
- * flushes to occur.
- */
-int
-i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
-{
- int ret;
-
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED |
- (write ? I915_WAIT_ALL : 0),
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- if (obj->write_domain == I915_GEM_DOMAIN_WC)
- return 0;
-
- /* Flush and acquire obj->pages so that we are coherent through
- * direct access in memory with previous cached writes through
- * shmemfs and that our cache domain tracking remains valid.
- * For example, if the obj->filp was moved to swap without us
- * being notified and releasing the pages, we would mistakenly
- * continue to assume that the obj remained out of the CPU cached
- * domain.
- */
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ret;
-
- flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
-
- /* Serialise direct access to this object with the barriers for
- * coherent writes from the GPU, by effectively invalidating the
- * WC domain upon first access.
- */
- if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
- mb();
-
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
- obj->read_domains |= I915_GEM_DOMAIN_WC;
- if (write) {
- obj->read_domains = I915_GEM_DOMAIN_WC;
- obj->write_domain = I915_GEM_DOMAIN_WC;
- obj->mm.dirty = true;
- }
-
- i915_gem_object_unpin_pages(obj);
- return 0;
-}
-
-/**
- * Moves a single object to the GTT read, and possibly write domain.
- * @obj: object to act on
- * @write: ask for write access or read only
- *
- * This function returns when the move is complete, including waiting on
- * flushes to occur.
- */
-int
-i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
-{
- int ret;
-
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED |
- (write ? I915_WAIT_ALL : 0),
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- if (obj->write_domain == I915_GEM_DOMAIN_GTT)
- return 0;
-
- /* Flush and acquire obj->pages so that we are coherent through
- * direct access in memory with previous cached writes through
- * shmemfs and that our cache domain tracking remains valid.
- * For example, if the obj->filp was moved to swap without us
- * being notified and releasing the pages, we would mistakenly
- * continue to assume that the obj remained out of the CPU cached
- * domain.
- */
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ret;
-
- flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
-
- /* Serialise direct access to this object with the barriers for
- * coherent writes from the GPU, by effectively invalidating the
- * GTT domain upon first access.
- */
- if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
- mb();
-
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
- if (write) {
- obj->read_domains = I915_GEM_DOMAIN_GTT;
- obj->write_domain = I915_GEM_DOMAIN_GTT;
- obj->mm.dirty = true;
- }
-
- i915_gem_object_unpin_pages(obj);
- return 0;
-}
-
-/**
- * Changes the cache-level of an object across all VMA.
- * @obj: object to act on
- * @cache_level: new cache level to set for the object
- *
- * After this function returns, the object will be in the new cache-level
- * across all GTT and the contents of the backing storage will be coherent,
- * with respect to the new cache-level. In order to keep the backing storage
- * coherent for all users, we only allow a single cache level to be set
- * globally on the object and prevent it from being changed whilst the
- * hardware is reading from the object. That is if the object is currently
- * on the scanout it will be set to uncached (or equivalent display
- * cache coherency) and all non-MOCS GPU access will also be uncached so
- * that all direct access to the scanout remains coherent.
- */
-int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level)
-{
- struct i915_vma *vma;
- int ret;
-
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
- if (obj->cache_level == cache_level)
- return 0;
-
- /* Inspect the list of currently bound VMA and unbind any that would
- * be invalid given the new cache-level. This is principally to
- * catch the issue of the CS prefetch crossing page boundaries and
- * reading an invalid PTE on older architectures.
- */
-restart:
- list_for_each_entry(vma, &obj->vma.list, obj_link) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- if (i915_vma_is_pinned(vma)) {
- DRM_DEBUG("can not change the cache level of pinned objects\n");
- return -EBUSY;
- }
-
- if (!i915_vma_is_closed(vma) &&
- i915_gem_valid_gtt_space(vma, cache_level))
- continue;
-
- ret = i915_vma_unbind(vma);
- if (ret)
- return ret;
-
- /* As unbinding may affect other elements in the
- * obj->vma_list (due to side-effects from retiring
- * an active vma), play safe and restart the iterator.
- */
- goto restart;
- }
-
- /* We can reuse the existing drm_mm nodes but need to change the
- * cache-level on the PTE. We could simply unbind them all and
- * rebind with the correct cache-level on next use. However since
- * we already have a valid slot, dma mapping, pages etc, we may as
- * rewrite the PTE in the belief that doing so tramples upon less
- * state and so involves less work.
- */
- if (obj->bind_count) {
- /* Before we change the PTE, the GPU must not be accessing it.
- * If we wait upon the object, we know that all the bound
- * VMA are no longer active.
- */
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED |
- I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- if (!HAS_LLC(to_i915(obj->base.dev)) &&
- cache_level != I915_CACHE_NONE) {
- /* Access to snoopable pages through the GTT is
- * incoherent and on some machines causes a hard
- * lockup. Relinquish the CPU mmaping to force
- * userspace to refault in the pages and we can
- * then double check if the GTT mapping is still
- * valid for that pointer access.
- */
- i915_gem_release_mmap(obj);
-
- /* As we no longer need a fence for GTT access,
- * we can relinquish it now (and so prevent having
- * to steal a fence from someone else on the next
- * fence request). Note GPU activity would have
- * dropped the fence as all snoopable access is
- * supposed to be linear.
- */
- for_each_ggtt_vma(vma, obj) {
- ret = i915_vma_put_fence(vma);
- if (ret)
- return ret;
- }
- } else {
- /* We either have incoherent backing store and
- * so no GTT access or the architecture is fully
- * coherent. In such cases, existing GTT mmaps
- * ignore the cache bit in the PTE and we can
- * rewrite it without confusing the GPU or having
- * to force userspace to fault back in its mmaps.
- */
- }
-
- list_for_each_entry(vma, &obj->vma.list, obj_link) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
- if (ret)
- return ret;
- }
- }
-
- list_for_each_entry(vma, &obj->vma.list, obj_link)
- vma->node.color = cache_level;
- i915_gem_object_set_cache_coherency(obj, cache_level);
- obj->cache_dirty = true; /* Always invalidate stale cachelines */
-
- return 0;
-}
-
-int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_gem_caching *args = data;
- struct drm_i915_gem_object *obj;
- int err = 0;
-
- rcu_read_lock();
- obj = i915_gem_object_lookup_rcu(file, args->handle);
- if (!obj) {
- err = -ENOENT;
- goto out;
- }
-
- switch (obj->cache_level) {
- case I915_CACHE_LLC:
- case I915_CACHE_L3_LLC:
- args->caching = I915_CACHING_CACHED;
- break;
-
- case I915_CACHE_WT:
- args->caching = I915_CACHING_DISPLAY;
- break;
-
- default:
- args->caching = I915_CACHING_NONE;
- break;
- }
-out:
- rcu_read_unlock();
- return err;
-}
-
-int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_private *i915 = to_i915(dev);
- struct drm_i915_gem_caching *args = data;
- struct drm_i915_gem_object *obj;
- enum i915_cache_level level;
- int ret = 0;
-
- switch (args->caching) {
- case I915_CACHING_NONE:
- level = I915_CACHE_NONE;
- break;
- case I915_CACHING_CACHED:
- /*
- * Due to a HW issue on BXT A stepping, GPU stores via a
- * snooped mapping may leave stale data in a corresponding CPU
- * cacheline, whereas normally such cachelines would get
- * invalidated.
- */
- if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
- return -ENODEV;
-
- level = I915_CACHE_LLC;
- break;
- case I915_CACHING_DISPLAY:
- level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
- break;
- default:
- return -EINVAL;
- }
-
- obj = i915_gem_object_lookup(file, args->handle);
- if (!obj)
- return -ENOENT;
-
- /*
- * The caching mode of proxy object is handled by its generator, and
- * not allowed to be changed by userspace.
- */
- if (i915_gem_object_is_proxy(obj)) {
- ret = -ENXIO;
- goto out;
- }
-
- if (obj->cache_level == level)
- goto out;
-
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- goto out;
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto out;
-
- ret = i915_gem_object_set_cache_level(obj, level);
- mutex_unlock(&dev->struct_mutex);
-
-out:
- i915_gem_object_put(obj);
- return ret;
-}
-
-/*
- * Prepare buffer for display plane (scanout, cursors, etc). Can be called from
- * an uninterruptible phase (modesetting) and allows any flushes to be pipelined
- * (for pageflips). We only flush the caches while preparing the buffer for
- * display, the callers are responsible for frontbuffer flush.
- */
-struct i915_vma *
-i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
- u32 alignment,
- const struct i915_ggtt_view *view,
- unsigned int flags)
-{
- struct i915_vma *vma;
- int ret;
-
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
- /* Mark the global pin early so that we account for the
- * display coherency whilst setting up the cache domains.
- */
- obj->pin_global++;
-
- /* The display engine is not coherent with the LLC cache on gen6. As
- * a result, we make sure that the pinning that is about to occur is
- * done with uncached PTEs. This is lowest common denominator for all
- * chipsets.
- *
- * However for gen6+, we could do better by using the GFDT bit instead
- * of uncaching, which would allow us to flush all the LLC-cached data
- * with that bit in the PTE to main memory with just one PIPE_CONTROL.
- */
- ret = i915_gem_object_set_cache_level(obj,
- HAS_WT(to_i915(obj->base.dev)) ?
- I915_CACHE_WT : I915_CACHE_NONE);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err_unpin_global;
- }
-
- /* As the user may map the buffer once pinned in the display plane
- * (e.g. libkms for the bootup splash), we have to ensure that we
- * always use map_and_fenceable for all scanout buffers. However,
- * it may simply be too big to fit into mappable, in which case
- * put it anyway and hope that userspace can cope (but always first
- * try to preserve the existing ABI).
- */
- vma = ERR_PTR(-ENOSPC);
- if ((flags & PIN_MAPPABLE) == 0 &&
- (!view || view->type == I915_GGTT_VIEW_NORMAL))
- vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
- flags |
- PIN_MAPPABLE |
- PIN_NONBLOCK);
- if (IS_ERR(vma))
- vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
- if (IS_ERR(vma))
- goto err_unpin_global;
-
- vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
-
- __i915_gem_object_flush_for_display(obj);
-
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
-
- return vma;
-
-err_unpin_global:
- obj->pin_global--;
- return vma;
-}
-
-void
-i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
-{
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
-
- if (WARN_ON(vma->obj->pin_global == 0))
- return;
-
- if (--vma->obj->pin_global == 0)
- vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
-
- /* Bump the LRU to try and avoid premature eviction whilst flipping */
- i915_gem_object_bump_inactive_ggtt(vma->obj);
-
- i915_vma_unpin(vma);
-}
-
-/**
- * Moves a single object to the CPU read, and possibly write domain.
- * @obj: object to act on
- * @write: requesting write or read-only access
- *
- * This function returns when the move is complete, including waiting on
- * flushes to occur.
- */
-int
-i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
-{
- int ret;
-
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED |
- (write ? I915_WAIT_ALL : 0),
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
-
- /* Flush the CPU cache if it's still invalid. */
- if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
- i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
- obj->read_domains |= I915_GEM_DOMAIN_CPU;
- }
-
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
-
- /* If we're writing through the CPU, then the GPU read domains will
- * need to be invalidated at next use.
- */
- if (write)
- __start_cpu_write(obj);
-
- return 0;
-}
-
-/* Throttle our rendering by waiting until the ring has completed our requests
- * emitted over 20 msec ago.
- *
- * Note that if we were to use the current jiffies each time around the loop,
- * we wouldn't escape the function with any frames outstanding if the time to
- * render a frame was over 20ms.
- *
- * This should get us reasonable parallelism between CPU and GPU but also
- * relatively low latency when blocking on a particular request to finish.
- */
-static int
-i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_file_private *file_priv = file->driver_priv;
- unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
- struct i915_request *request, *target = NULL;
- long ret;
-
- /* ABI: return -EIO if already wedged */
- ret = i915_terminally_wedged(dev_priv);
- if (ret)
- return ret;
-
- spin_lock(&file_priv->mm.lock);
- list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
- if (time_after_eq(request->emitted_jiffies, recent_enough))
- break;
-
- if (target) {
- list_del(&target->client_link);
- target->file_priv = NULL;
- }
-
- target = request;
- }
- if (target)
- i915_request_get(target);
- spin_unlock(&file_priv->mm.lock);
-
- if (target == NULL)
- return 0;
-
- ret = i915_request_wait(target,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- i915_request_put(target);
-
- return ret < 0 ? ret : 0;
-}
-
struct i915_vma *
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
@@ -3842,146 +1096,11 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return vma;
}
-static __always_inline u32 __busy_read_flag(u8 id)
-{
- if (id == (u8)I915_ENGINE_CLASS_INVALID)
- return 0xffff0000u;
-
- GEM_BUG_ON(id >= 16);
- return 0x10000u << id;
-}
-
-static __always_inline u32 __busy_write_id(u8 id)
-{
- /*
- * The uABI guarantees an active writer is also amongst the read
- * engines. This would be true if we accessed the activity tracking
- * under the lock, but as we perform the lookup of the object and
- * its activity locklessly we can not guarantee that the last_write
- * being active implies that we have set the same engine flag from
- * last_read - hence we always set both read and write busy for
- * last_write.
- */
- if (id == (u8)I915_ENGINE_CLASS_INVALID)
- return 0xffffffffu;
-
- return (id + 1) | __busy_read_flag(id);
-}
-
-static __always_inline unsigned int
-__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
-{
- const struct i915_request *rq;
-
- /*
- * We have to check the current hw status of the fence as the uABI
- * guarantees forward progress. We could rely on the idle worker
- * to eventually flush us, but to minimise latency just ask the
- * hardware.
- *
- * Note we only report on the status of native fences.
- */
- if (!dma_fence_is_i915(fence))
- return 0;
-
- /* opencode to_request() in order to avoid const warnings */
- rq = container_of(fence, const struct i915_request, fence);
- if (i915_request_completed(rq))
- return 0;
-
- /* Beware type-expansion follies! */
- BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
- return flag(rq->engine->uabi_class);
-}
-
-static __always_inline unsigned int
-busy_check_reader(const struct dma_fence *fence)
-{
- return __busy_set_if_active(fence, __busy_read_flag);
-}
-
-static __always_inline unsigned int
-busy_check_writer(const struct dma_fence *fence)
-{
- if (!fence)
- return 0;
-
- return __busy_set_if_active(fence, __busy_write_id);
-}
-
-int
-i915_gem_busy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_gem_busy *args = data;
- struct drm_i915_gem_object *obj;
- struct reservation_object_list *list;
- unsigned int seq;
- int err;
-
- err = -ENOENT;
- rcu_read_lock();
- obj = i915_gem_object_lookup_rcu(file, args->handle);
- if (!obj)
- goto out;
-
- /*
- * A discrepancy here is that we do not report the status of
- * non-i915 fences, i.e. even though we may report the object as idle,
- * a call to set-domain may still stall waiting for foreign rendering.
- * This also means that wait-ioctl may report an object as busy,
- * where busy-ioctl considers it idle.
- *
- * We trade the ability to warn of foreign fences to report on which
- * i915 engines are active for the object.
- *
- * Alternatively, we can trade that extra information on read/write
- * activity with
- * args->busy =
- * !reservation_object_test_signaled_rcu(obj->resv, true);
- * to report the overall busyness. This is what the wait-ioctl does.
- *
- */
-retry:
- seq = raw_read_seqcount(&obj->resv->seq);
-
- /* Translate the exclusive fence to the READ *and* WRITE engine */
- args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
-
- /* Translate shared fences to READ set of engines */
- list = rcu_dereference(obj->resv->fence);
- if (list) {
- unsigned int shared_count = list->shared_count, i;
-
- for (i = 0; i < shared_count; ++i) {
- struct dma_fence *fence =
- rcu_dereference(list->shared[i]);
-
- args->busy |= busy_check_reader(fence);
- }
- }
-
- if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
- goto retry;
-
- err = 0;
-out:
- rcu_read_unlock();
- return err;
-}
-
-int
-i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return i915_gem_ring_throttle(dev, file_priv);
-}
-
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_madvise *args = data;
struct drm_i915_gem_object *obj;
int err;
@@ -4004,7 +1123,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (i915_gem_object_has_pages(obj) &&
i915_gem_object_is_tiled(obj) &&
- dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (obj->mm.madv == I915_MADV_WILLNEED) {
GEM_BUG_ON(!obj->mm.quirked);
__i915_gem_object_unpin_pages(obj);
@@ -4020,6 +1139,24 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (obj->mm.madv != __I915_MADV_PURGED)
obj->mm.madv = args->madv;
+ if (i915_gem_object_has_pages(obj)) {
+ struct list_head *list;
+
+ if (i915_gem_object_is_shrinkable(obj)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+
+ if (obj->mm.madv != I915_MADV_WILLNEED)
+ list = &i915->mm.purge_list;
+ else
+ list = &i915->mm.shrink_list;
+ list_move_tail(&obj->mm.link, list);
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ }
+ }
+
/* if the object is no longer attached, discard its backing storage */
if (obj->mm.madv == I915_MADV_DONTNEED &&
!i915_gem_object_has_pages(obj))
@@ -4033,355 +1170,13 @@ out:
return err;
}
-static void
-frontbuffer_retire(struct i915_active_request *active,
- struct i915_request *request)
-{
- struct drm_i915_gem_object *obj =
- container_of(active, typeof(*obj), frontbuffer_write);
-
- intel_fb_obj_flush(obj, ORIGIN_CS);
-}
-
-void i915_gem_object_init(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_object_ops *ops)
-{
- mutex_init(&obj->mm.lock);
-
- spin_lock_init(&obj->vma.lock);
- INIT_LIST_HEAD(&obj->vma.list);
-
- INIT_LIST_HEAD(&obj->lut_list);
- INIT_LIST_HEAD(&obj->batch_pool_link);
-
- init_rcu_head(&obj->rcu);
-
- obj->ops = ops;
-
- reservation_object_init(&obj->__builtin_resv);
- obj->resv = &obj->__builtin_resv;
-
- obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
- i915_active_request_init(&obj->frontbuffer_write,
- NULL, frontbuffer_retire);
-
- obj->mm.madv = I915_MADV_WILLNEED;
- INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
- mutex_init(&obj->mm.get_page.lock);
-
- i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
-}
-
-static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
- I915_GEM_OBJECT_IS_SHRINKABLE,
-
- .get_pages = i915_gem_object_get_pages_gtt,
- .put_pages = i915_gem_object_put_pages_gtt,
-
- .pwrite = i915_gem_object_pwrite_gtt,
-};
-
-static int i915_gem_object_create_shmem(struct drm_device *dev,
- struct drm_gem_object *obj,
- size_t size)
-{
- struct drm_i915_private *i915 = to_i915(dev);
- unsigned long flags = VM_NORESERVE;
- struct file *filp;
-
- drm_gem_private_object_init(dev, obj, size);
-
- if (i915->mm.gemfs)
- filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
- flags);
- else
- filp = shmem_file_setup("i915", size, flags);
-
- if (IS_ERR(filp))
- return PTR_ERR(filp);
-
- obj->filp = filp;
-
- return 0;
-}
-
-struct drm_i915_gem_object *
-i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
-{
- struct drm_i915_gem_object *obj;
- struct address_space *mapping;
- unsigned int cache_level;
- gfp_t mask;
- int ret;
-
- /* There is a prevalence of the assumption that we fit the object's
- * page count inside a 32bit _signed_ variable. Let's document this and
- * catch if we ever need to fix it. In the meantime, if you do spot
- * such a local variable, please consider fixing!
- */
- if (size >> PAGE_SHIFT > INT_MAX)
- return ERR_PTR(-E2BIG);
-
- if (overflows_type(size, obj->base.size))
- return ERR_PTR(-E2BIG);
-
- obj = i915_gem_object_alloc();
- if (obj == NULL)
- return ERR_PTR(-ENOMEM);
-
- ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
- if (ret)
- goto fail;
-
- mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
- if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
- /* 965gm cannot relocate objects above 4GiB. */
- mask &= ~__GFP_HIGHMEM;
- mask |= __GFP_DMA32;
- }
-
- mapping = obj->base.filp->f_mapping;
- mapping_set_gfp_mask(mapping, mask);
- GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
-
- i915_gem_object_init(obj, &i915_gem_object_ops);
-
- obj->write_domain = I915_GEM_DOMAIN_CPU;
- obj->read_domains = I915_GEM_DOMAIN_CPU;
-
- if (HAS_LLC(dev_priv))
- /* On some devices, we can have the GPU use the LLC (the CPU
- * cache) for about a 10% performance improvement
- * compared to uncached. Graphics requests other than
- * display scanout are coherent with the CPU in
- * accessing this cache. This means in this mode we
- * don't need to clflush on the CPU side, and on the
- * GPU side we only need to flush internal caches to
- * get data visible to the CPU.
- *
- * However, we maintain the display planes as UC, and so
- * need to rebind when first used as such.
- */
- cache_level = I915_CACHE_LLC;
- else
- cache_level = I915_CACHE_NONE;
-
- i915_gem_object_set_cache_coherency(obj, cache_level);
-
- trace_i915_gem_object_create(obj);
-
- return obj;
-
-fail:
- i915_gem_object_free(obj);
- return ERR_PTR(ret);
-}
-
-static bool discard_backing_storage(struct drm_i915_gem_object *obj)
-{
- /* If we are the last user of the backing storage (be it shmemfs
- * pages or stolen etc), we know that the pages are going to be
- * immediately released. In this case, we can then skip copying
- * back the contents from the GPU.
- */
-
- if (obj->mm.madv != I915_MADV_WILLNEED)
- return false;
-
- if (obj->base.filp == NULL)
- return true;
-
- /* At first glance, this looks racy, but then again so would be
- * userspace racing mmap against close. However, the first external
- * reference to the filp can only be obtained through the
- * i915_gem_mmap_ioctl() which safeguards us against the user
- * acquiring such a reference whilst we are in the middle of
- * freeing the object.
- */
- return file_count(obj->base.filp) == 1;
-}
-
-static void __i915_gem_free_objects(struct drm_i915_private *i915,
- struct llist_node *freed)
-{
- struct drm_i915_gem_object *obj, *on;
- intel_wakeref_t wakeref;
-
- wakeref = intel_runtime_pm_get(i915);
- llist_for_each_entry_safe(obj, on, freed, freed) {
- struct i915_vma *vma, *vn;
-
- trace_i915_gem_object_destroy(obj);
-
- mutex_lock(&i915->drm.struct_mutex);
-
- GEM_BUG_ON(i915_gem_object_is_active(obj));
- list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
- GEM_BUG_ON(i915_vma_is_active(vma));
- vma->flags &= ~I915_VMA_PIN_MASK;
- i915_vma_destroy(vma);
- }
- GEM_BUG_ON(!list_empty(&obj->vma.list));
- GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
-
- /* This serializes freeing with the shrinker. Since the free
- * is delayed, first by RCU then by the workqueue, we want the
- * shrinker to be able to free pages of unreferenced objects,
- * or else we may oom whilst there are plenty of deferred
- * freed objects.
- */
- if (i915_gem_object_has_pages(obj)) {
- spin_lock(&i915->mm.obj_lock);
- list_del_init(&obj->mm.link);
- spin_unlock(&i915->mm.obj_lock);
- }
-
- mutex_unlock(&i915->drm.struct_mutex);
-
- GEM_BUG_ON(obj->bind_count);
- GEM_BUG_ON(obj->userfault_count);
- GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
- GEM_BUG_ON(!list_empty(&obj->lut_list));
-
- if (obj->ops->release)
- obj->ops->release(obj);
-
- if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
- atomic_set(&obj->mm.pages_pin_count, 0);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- GEM_BUG_ON(i915_gem_object_has_pages(obj));
-
- if (obj->base.import_attach)
- drm_prime_gem_destroy(&obj->base, NULL);
-
- reservation_object_fini(&obj->__builtin_resv);
- drm_gem_object_release(&obj->base);
- i915_gem_info_remove_obj(i915, obj->base.size);
-
- bitmap_free(obj->bit_17);
- i915_gem_object_free(obj);
-
- GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
- atomic_dec(&i915->mm.free_count);
-
- if (on)
- cond_resched();
- }
- intel_runtime_pm_put(i915, wakeref);
-}
-
-static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
-{
- struct llist_node *freed;
-
- /* Free the oldest, most stale object to keep the free_list short */
- freed = NULL;
- if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
- /* Only one consumer of llist_del_first() allowed */
- spin_lock(&i915->mm.free_lock);
- freed = llist_del_first(&i915->mm.free_list);
- spin_unlock(&i915->mm.free_lock);
- }
- if (unlikely(freed)) {
- freed->next = NULL;
- __i915_gem_free_objects(i915, freed);
- }
-}
-
-static void __i915_gem_free_work(struct work_struct *work)
-{
- struct drm_i915_private *i915 =
- container_of(work, struct drm_i915_private, mm.free_work);
- struct llist_node *freed;
-
- /*
- * All file-owned VMA should have been released by this point through
- * i915_gem_close_object(), or earlier by i915_gem_context_close().
- * However, the object may also be bound into the global GTT (e.g.
- * older GPUs without per-process support, or for direct access through
- * the GTT either for the user or for scanout). Those VMA still need to
- * unbound now.
- */
-
- spin_lock(&i915->mm.free_lock);
- while ((freed = llist_del_all(&i915->mm.free_list))) {
- spin_unlock(&i915->mm.free_lock);
-
- __i915_gem_free_objects(i915, freed);
- if (need_resched())
- return;
-
- spin_lock(&i915->mm.free_lock);
- }
- spin_unlock(&i915->mm.free_lock);
-}
-
-static void __i915_gem_free_object_rcu(struct rcu_head *head)
-{
- struct drm_i915_gem_object *obj =
- container_of(head, typeof(*obj), rcu);
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
-
- /*
- * We reuse obj->rcu for the freed list, so we had better not treat
- * it like a rcu_head from this point forwards. And we expect all
- * objects to be freed via this path.
- */
- destroy_rcu_head(&obj->rcu);
-
- /*
- * Since we require blocking on struct_mutex to unbind the freed
- * object from the GPU before releasing resources back to the
- * system, we can not do that directly from the RCU callback (which may
- * be a softirq context), but must instead then defer that work onto a
- * kthread. We use the RCU callback rather than move the freed object
- * directly onto the work queue so that we can mix between using the
- * worker and performing frees directly from subsequent allocations for
- * crude but effective memory throttling.
- */
- if (llist_add(&obj->freed, &i915->mm.free_list))
- queue_work(i915->wq, &i915->mm.free_work);
-}
-
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
-{
- struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
-
- if (obj->mm.quirked)
- __i915_gem_object_unpin_pages(obj);
-
- if (discard_backing_storage(obj))
- obj->mm.madv = I915_MADV_DONTNEED;
-
- /*
- * Before we free the object, make sure any pure RCU-only
- * read-side critical sections are complete, e.g.
- * i915_gem_busy_ioctl(). For the corresponding synchronized
- * lookup see i915_gem_object_lookup_rcu().
- */
- atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
- call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
-}
-
-void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
-{
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
- if (!i915_gem_object_has_active_reference(obj) &&
- i915_gem_object_is_active(obj))
- i915_gem_object_set_active_reference(obj);
- else
- i915_gem_object_put(obj);
-}
-
void i915_gem_sanitize(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
GEM_TRACE("\n");
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
/*
@@ -4401,141 +1196,10 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* it may impact the display and we are uncertain about the stability
* of the reset, so this could be applied to even earlier gen.
*/
- intel_engines_sanitize(i915, false);
-
- intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(i915, wakeref);
-
- mutex_lock(&i915->drm.struct_mutex);
- i915_gem_contexts_lost(i915);
- mutex_unlock(&i915->drm.struct_mutex);
-}
-
-void i915_gem_suspend(struct drm_i915_private *i915)
-{
- intel_wakeref_t wakeref;
-
- GEM_TRACE("\n");
-
- wakeref = intel_runtime_pm_get(i915);
-
- flush_workqueue(i915->wq);
-
- mutex_lock(&i915->drm.struct_mutex);
-
- /*
- * We have to flush all the executing contexts to main memory so
- * that they can saved in the hibernation image. To ensure the last
- * context image is coherent, we have to switch away from it. That
- * leaves the i915->kernel_context still active when
- * we actually suspend, and its image in memory may not match the GPU
- * state. Fortunately, the kernel_context is disposable and we do
- * not rely on its state.
- */
- switch_to_kernel_context_sync(i915, i915->gt.active_engines);
-
- mutex_unlock(&i915->drm.struct_mutex);
- i915_reset_flush(i915);
-
- drain_delayed_work(&i915->gt.retire_work);
-
- /*
- * As the idle_work is rearming if it detects a race, play safe and
- * repeat the flush until it is definitely idle.
- */
- drain_delayed_work(&i915->gt.idle_work);
-
- /*
- * Assert that we successfully flushed all the work and
- * reset the GPU back to its idle, low power state.
- */
- GEM_BUG_ON(i915->gt.awake);
-
- intel_uc_suspend(i915);
-
- intel_runtime_pm_put(i915, wakeref);
-}
-
-void i915_gem_suspend_late(struct drm_i915_private *i915)
-{
- struct drm_i915_gem_object *obj;
- struct list_head *phases[] = {
- &i915->mm.unbound_list,
- &i915->mm.bound_list,
- NULL
- }, **phase;
-
- /*
- * Neither the BIOS, ourselves or any other kernel
- * expects the system to be in execlists mode on startup,
- * so we need to reset the GPU back to legacy mode. And the only
- * known way to disable logical contexts is through a GPU reset.
- *
- * So in order to leave the system in a known default configuration,
- * always reset the GPU upon unload and suspend. Afterwards we then
- * clean up the GEM state tracking, flushing off the requests and
- * leaving the system in a known idle state.
- *
- * Note that is of the upmost importance that the GPU is idle and
- * all stray writes are flushed *before* we dismantle the backing
- * storage for the pinned objects.
- *
- * However, since we are uncertain that resetting the GPU on older
- * machines is a good idea, we don't - just in case it leaves the
- * machine in an unusable condition.
- */
-
- mutex_lock(&i915->drm.struct_mutex);
- for (phase = phases; *phase; phase++) {
- list_for_each_entry(obj, *phase, mm.link)
- WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
- }
- mutex_unlock(&i915->drm.struct_mutex);
+ intel_gt_sanitize(i915, false);
- intel_uc_sanitize(i915);
- i915_gem_sanitize(i915);
-}
-
-void i915_gem_resume(struct drm_i915_private *i915)
-{
- GEM_TRACE("\n");
-
- WARN_ON(i915->gt.awake);
-
- mutex_lock(&i915->drm.struct_mutex);
- intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
-
- i915_gem_restore_gtt_mappings(i915);
- i915_gem_restore_fences(i915);
-
- /*
- * As we didn't flush the kernel context before suspend, we cannot
- * guarantee that the context image is complete. So let's just reset
- * it and start again.
- */
- intel_gt_resume(i915);
-
- if (i915_gem_init_hw(i915))
- goto err_wedged;
-
- intel_uc_resume(i915);
-
- /* Always reload a context for powersaving. */
- if (!load_power_context(i915))
- goto err_wedged;
-
-out_unlock:
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
- mutex_unlock(&i915->drm.struct_mutex);
- return;
-
-err_wedged:
- if (!i915_reset_failed(i915)) {
- dev_err(i915->drm.dev,
- "Failed to re-initialize GPU, declaring it wedged!\n");
- i915_gem_set_wedged(i915);
- }
- goto out_unlock;
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
@@ -4586,27 +1250,6 @@ static void init_unused_rings(struct drm_i915_private *dev_priv)
}
}
-static int __i915_gem_restart_engines(void *data)
-{
- struct drm_i915_private *i915 = data;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err;
-
- for_each_engine(engine, i915, id) {
- err = engine->init_hw(engine);
- if (err) {
- DRM_ERROR("Failed to restart %s (%d)\n",
- engine->name, err);
- return err;
- }
- }
-
- intel_engines_set_scheduler_caps(i915);
-
- return 0;
-}
-
int i915_gem_init_hw(struct drm_i915_private *dev_priv)
{
int ret;
@@ -4665,12 +1308,13 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
intel_mocs_init_l3cc_table(dev_priv);
/* Only when the HW is re-initialised, can we replay the requests */
- ret = __i915_gem_restart_engines(dev_priv);
+ ret = intel_engines_resume(dev_priv);
if (ret)
goto cleanup_uc;
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+ intel_engines_set_scheduler_caps(dev_priv);
return 0;
cleanup_uc:
@@ -4683,8 +1327,9 @@ out:
static int __intel_engines_record_defaults(struct drm_i915_private *i915)
{
- struct i915_gem_context *ctx;
struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ struct i915_gem_engines *e;
enum intel_engine_id id;
int err = 0;
@@ -4701,18 +1346,21 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ e = i915_gem_context_lock_engines(ctx);
+
for_each_engine(engine, i915, id) {
+ struct intel_context *ce = e->engines[id];
struct i915_request *rq;
- rq = i915_request_alloc(engine, ctx);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_ctx;
+ goto err_active;
}
err = 0;
- if (engine->init_context)
- err = engine->init_context(rq);
+ if (rq->engine->init_context)
+ err = rq->engine->init_context(rq);
i915_request_add(rq);
if (err)
@@ -4720,21 +1368,16 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
}
/* Flush the default context image to memory, and enable powersaving. */
- if (!load_power_context(i915)) {
+ if (!i915_gem_load_power_context(i915)) {
err = -EIO;
goto err_active;
}
for_each_engine(engine, i915, id) {
- struct intel_context *ce;
- struct i915_vma *state;
+ struct intel_context *ce = e->engines[id];
+ struct i915_vma *state = ce->state;
void *vaddr;
- ce = intel_context_lookup(ctx, engine);
- if (!ce)
- continue;
-
- state = ce->state;
if (!state)
continue;
@@ -4752,7 +1395,9 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
if (err)
goto err_active;
+ i915_gem_object_lock(state->obj);
err = i915_gem_object_set_to_cpu_domain(state->obj, false);
+ i915_gem_object_unlock(state->obj);
if (err)
goto err_active;
@@ -4790,6 +1435,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
}
out_ctx:
+ i915_gem_context_unlock_engines(ctx);
i915_gem_context_set_closed(ctx);
i915_gem_context_put(ctx);
return err;
@@ -4842,6 +1488,23 @@ static void i915_gem_fini_scratch(struct drm_i915_private *i915)
i915_vma_unpin_and_release(&i915->gt.scratch, 0);
}
+static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return 0;
+
+ for_each_engine(engine, i915, id) {
+ if (intel_engine_verify_workarounds(engine, "load"))
+ err = -EIO;
+ }
+
+ return err;
+}
+
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
@@ -4853,11 +1516,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
- dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
- else
- dev_priv->gt.cleanup_engine = intel_engine_cleanup;
-
i915_timelines_init(dev_priv);
ret = i915_gem_init_userptr(dev_priv);
@@ -4894,6 +1552,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_ggtt;
}
+ ret = intel_engines_setup(dev_priv);
+ if (ret) {
+ GEM_BUG_ON(ret == -EIO);
+ goto err_unlock;
+ }
+
ret = i915_gem_contexts_init(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
@@ -4927,6 +1591,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
*/
intel_init_clock_gating(dev_priv);
+ ret = intel_engines_verify_workarounds(dev_priv);
+ if (ret)
+ goto err_init_hw;
+
ret = __intel_engines_record_defaults(dev_priv);
if (ret)
goto err_init_hw;
@@ -4955,6 +1623,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
err_init_hw:
mutex_unlock(&dev_priv->drm.struct_mutex);
+ i915_gem_set_wedged(dev_priv);
i915_gem_suspend(dev_priv);
i915_gem_suspend_late(dev_priv);
@@ -4967,7 +1636,7 @@ err_uc_init:
err_pm:
if (ret != -EIO) {
intel_cleanup_gt_powersave(dev_priv);
- i915_gem_cleanup_engines(dev_priv);
+ intel_engines_cleanup(dev_priv);
}
err_context:
if (ret != -EIO)
@@ -5014,8 +1683,12 @@ err_uc_misc:
return ret;
}
-void i915_gem_fini(struct drm_i915_private *dev_priv)
+void i915_gem_fini_hw(struct drm_i915_private *dev_priv)
{
+ GEM_BUG_ON(dev_priv->gt.awake);
+
+ intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
+
i915_gem_suspend_late(dev_priv);
intel_disable_gt_powersave(dev_priv);
@@ -5025,7 +1698,15 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
intel_uc_fini_hw(dev_priv);
intel_uc_fini(dev_priv);
- i915_gem_cleanup_engines(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ i915_gem_drain_freed_objects(dev_priv);
+}
+
+void i915_gem_fini(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_engines_cleanup(dev_priv);
i915_gem_contexts_fini(dev_priv);
i915_gem_fini_scratch(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -5048,77 +1729,32 @@ void i915_gem_init_mmio(struct drm_i915_private *i915)
i915_gem_sanitize(i915);
}
-void
-i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, dev_priv, id)
- dev_priv->gt.cleanup_engine(engine);
-}
-
-void
-i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
-{
- int i;
-
- if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
- !IS_CHERRYVIEW(dev_priv))
- dev_priv->num_fence_regs = 32;
- else if (INTEL_GEN(dev_priv) >= 4 ||
- IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
- dev_priv->num_fence_regs = 16;
- else
- dev_priv->num_fence_regs = 8;
-
- if (intel_vgpu_active(dev_priv))
- dev_priv->num_fence_regs =
- I915_READ(vgtif_reg(avail_rs.fence_num));
-
- /* Initialize fence registers to zero */
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
-
- fence->i915 = dev_priv;
- fence->id = i;
- list_add_tail(&fence->link, &dev_priv->mm.fence_list);
- }
- i915_gem_restore_fences(dev_priv);
-
- i915_gem_detect_bit_6_swizzle(dev_priv);
-}
-
static void i915_gem_init__mm(struct drm_i915_private *i915)
{
- spin_lock_init(&i915->mm.object_stat_lock);
spin_lock_init(&i915->mm.obj_lock);
spin_lock_init(&i915->mm.free_lock);
init_llist_head(&i915->mm.free_list);
- INIT_LIST_HEAD(&i915->mm.unbound_list);
- INIT_LIST_HEAD(&i915->mm.bound_list);
- INIT_LIST_HEAD(&i915->mm.fence_list);
- INIT_LIST_HEAD(&i915->mm.userfault_list);
+ INIT_LIST_HEAD(&i915->mm.purge_list);
+ INIT_LIST_HEAD(&i915->mm.shrink_list);
- INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
+ i915_gem_init__objects(i915);
}
int i915_gem_init_early(struct drm_i915_private *dev_priv)
{
int err;
+ intel_gt_pm_init(dev_priv);
+
INIT_LIST_HEAD(&dev_priv->gt.active_rings);
INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
+ spin_lock_init(&dev_priv->gt.closed_lock);
i915_gem_init__mm(dev_priv);
+ i915_gem_init__pm(dev_priv);
- INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
- i915_gem_retire_work_handler);
- INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
- i915_gem_idle_work_handler);
init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
mutex_init(&dev_priv->gpu_error.wedge_mutex);
@@ -5140,7 +1776,7 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
i915_gem_drain_freed_objects(dev_priv);
GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
- WARN_ON(dev_priv->mm.object_count);
+ WARN_ON(dev_priv->mm.shrink_count);
cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
@@ -5160,11 +1796,7 @@ int i915_gem_freeze(struct drm_i915_private *dev_priv)
int i915_gem_freeze_late(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
- struct list_head *phases[] = {
- &i915->mm.unbound_list,
- &i915->mm.bound_list,
- NULL
- }, **phase;
+ intel_wakeref_t wakeref;
/*
* Called just before we write the hibernation image.
@@ -5181,15 +1813,18 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
* the objects as well, see i915_gem_freeze()
*/
- i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ i915_gem_shrink(i915, -1UL, NULL, ~0);
i915_gem_drain_freed_objects(i915);
- mutex_lock(&i915->drm.struct_mutex);
- for (phase = phases; *phase; phase++) {
- list_for_each_entry(obj, *phase, mm.link)
- WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
+ list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
+ i915_gem_object_lock(obj);
+ WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
+ i915_gem_object_unlock(obj);
}
- mutex_unlock(&i915->drm.struct_mutex);
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return 0;
}
@@ -5270,276 +1905,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
}
}
-/* Allocate a new GEM object and fill it with the supplied data */
-struct drm_i915_gem_object *
-i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
- const void *data, size_t size)
-{
- struct drm_i915_gem_object *obj;
- struct file *file;
- size_t offset;
- int err;
-
- obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
- if (IS_ERR(obj))
- return obj;
-
- GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
-
- file = obj->base.filp;
- offset = 0;
- do {
- unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
- struct page *page;
- void *pgdata, *vaddr;
-
- err = pagecache_write_begin(file, file->f_mapping,
- offset, len, 0,
- &page, &pgdata);
- if (err < 0)
- goto fail;
-
- vaddr = kmap(page);
- memcpy(vaddr, data, len);
- kunmap(page);
-
- err = pagecache_write_end(file, file->f_mapping,
- offset, len, len,
- page, pgdata);
- if (err < 0)
- goto fail;
-
- size -= len;
- data += len;
- offset += len;
- } while (size);
-
- return obj;
-
-fail:
- i915_gem_object_put(obj);
- return ERR_PTR(err);
-}
-
-struct scatterlist *
-i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- unsigned int n,
- unsigned int *offset)
-{
- struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
- struct scatterlist *sg;
- unsigned int idx, count;
-
- might_sleep();
- GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
- /* As we iterate forward through the sg, we record each entry in a
- * radixtree for quick repeated (backwards) lookups. If we have seen
- * this index previously, we will have an entry for it.
- *
- * Initial lookup is O(N), but this is amortized to O(1) for
- * sequential page access (where each new request is consecutive
- * to the previous one). Repeated lookups are O(lg(obj->base.size)),
- * i.e. O(1) with a large constant!
- */
- if (n < READ_ONCE(iter->sg_idx))
- goto lookup;
-
- mutex_lock(&iter->lock);
-
- /* We prefer to reuse the last sg so that repeated lookup of this
- * (or the subsequent) sg are fast - comparing against the last
- * sg is faster than going through the radixtree.
- */
-
- sg = iter->sg_pos;
- idx = iter->sg_idx;
- count = __sg_page_count(sg);
-
- while (idx + count <= n) {
- void *entry;
- unsigned long i;
- int ret;
-
- /* If we cannot allocate and insert this entry, or the
- * individual pages from this range, cancel updating the
- * sg_idx so that on this lookup we are forced to linearly
- * scan onwards, but on future lookups we will try the
- * insertion again (in which case we need to be careful of
- * the error return reporting that we have already inserted
- * this index).
- */
- ret = radix_tree_insert(&iter->radix, idx, sg);
- if (ret && ret != -EEXIST)
- goto scan;
-
- entry = xa_mk_value(idx);
- for (i = 1; i < count; i++) {
- ret = radix_tree_insert(&iter->radix, idx + i, entry);
- if (ret && ret != -EEXIST)
- goto scan;
- }
-
- idx += count;
- sg = ____sg_next(sg);
- count = __sg_page_count(sg);
- }
-
-scan:
- iter->sg_pos = sg;
- iter->sg_idx = idx;
-
- mutex_unlock(&iter->lock);
-
- if (unlikely(n < idx)) /* insertion completed by another thread */
- goto lookup;
-
- /* In case we failed to insert the entry into the radixtree, we need
- * to look beyond the current sg.
- */
- while (idx + count <= n) {
- idx += count;
- sg = ____sg_next(sg);
- count = __sg_page_count(sg);
- }
-
- *offset = n - idx;
- return sg;
-
-lookup:
- rcu_read_lock();
-
- sg = radix_tree_lookup(&iter->radix, n);
- GEM_BUG_ON(!sg);
-
- /* If this index is in the middle of multi-page sg entry,
- * the radix tree will contain a value entry that points
- * to the start of that range. We will return the pointer to
- * the base page and the offset of this page within the
- * sg entry's range.
- */
- *offset = 0;
- if (unlikely(xa_is_value(sg))) {
- unsigned long base = xa_to_value(sg);
-
- sg = radix_tree_lookup(&iter->radix, base);
- GEM_BUG_ON(!sg);
-
- *offset = n - base;
- }
-
- rcu_read_unlock();
-
- return sg;
-}
-
-struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
-{
- struct scatterlist *sg;
- unsigned int offset;
-
- GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
-
- sg = i915_gem_object_get_sg(obj, n, &offset);
- return nth_page(sg_page(sg), offset);
-}
-
-/* Like i915_gem_object_get_page(), but mark the returned page dirty */
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
- unsigned int n)
-{
- struct page *page;
-
- page = i915_gem_object_get_page(obj, n);
- if (!obj->mm.dirty)
- set_page_dirty(page);
-
- return page;
-}
-
-dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
- unsigned long n)
-{
- struct scatterlist *sg;
- unsigned int offset;
-
- sg = i915_gem_object_get_sg(obj, n, &offset);
- return sg_dma_address(sg) + (offset << PAGE_SHIFT);
-}
-
-int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
-{
- struct sg_table *pages;
- int err;
-
- if (align > obj->base.size)
- return -EINVAL;
-
- if (obj->ops == &i915_gem_phys_ops)
- return 0;
-
- if (obj->ops != &i915_gem_object_ops)
- return -EINVAL;
-
- err = i915_gem_object_unbind(obj);
- if (err)
- return err;
-
- mutex_lock(&obj->mm.lock);
-
- if (obj->mm.madv != I915_MADV_WILLNEED) {
- err = -EFAULT;
- goto err_unlock;
- }
-
- if (obj->mm.quirked) {
- err = -EFAULT;
- goto err_unlock;
- }
-
- if (obj->mm.mapping) {
- err = -EBUSY;
- goto err_unlock;
- }
-
- pages = __i915_gem_object_unset_pages(obj);
-
- obj->ops = &i915_gem_phys_ops;
-
- err = ____i915_gem_object_get_pages(obj);
- if (err)
- goto err_xfer;
-
- /* Perma-pin (until release) the physical set of pages */
- __i915_gem_object_pin_pages(obj);
-
- if (!IS_ERR_OR_NULL(pages))
- i915_gem_object_ops.put_pages(obj, pages);
- mutex_unlock(&obj->mm.lock);
- return 0;
-
-err_xfer:
- obj->ops = &i915_gem_object_ops;
- if (!IS_ERR_OR_NULL(pages)) {
- unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
-
- __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
- }
-err_unlock:
- mutex_unlock(&obj->mm.lock);
- return err;
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/scatterlist.c"
#include "selftests/mock_gem_device.c"
-#include "selftests/huge_gem_object.c"
-#include "selftests/huge_pages.c"
-#include "selftests/i915_gem_object.c"
-#include "selftests/i915_gem_coherency.c"
#include "selftests/i915_gem.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 9074eb1e843f..fe82d3571072 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -75,9 +75,6 @@ struct drm_i915_private;
#define I915_GEM_IDLE_TIMEOUT (HZ / 5)
-void i915_gem_park(struct drm_i915_private *i915);
-void i915_gem_unpark(struct drm_i915_private *i915);
-
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
{
if (!atomic_fetch_inc(&t->count))
@@ -94,4 +91,9 @@ static inline bool __tasklet_enable(struct tasklet_struct *t)
return atomic_dec_and_test(&t->count);
}
+static inline bool __tasklet_is_scheduled(struct tasklet_struct *t)
+{
+ return test_bit(TASKLET_STATE_SCHED, &t->state);
+}
+
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index f3890b664e3f..25a3e4d09a2f 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -55,7 +55,7 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
list_for_each_entry_safe(obj, next,
&pool->cache_list[n],
batch_pool_link)
- __i915_gem_object_release_unless_active(obj);
+ i915_gem_object_put(obj);
INIT_LIST_HEAD(&pool->cache_list[n]);
}
@@ -96,7 +96,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
list_for_each_entry(obj, list, batch_pool_link) {
/* The batches are strictly LRU ordered */
if (i915_gem_object_is_active(obj)) {
- struct reservation_object *resv = obj->resv;
+ struct reservation_object *resv = obj->base.resv;
if (!reservation_object_test_signaled_rcu(resv, true))
break;
@@ -119,7 +119,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
}
}
- GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv,
+ GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->base.resv,
true));
if (obj->base.size >= size)
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
index 56947daaaf65..feeeeeaa54d8 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.h
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
+struct drm_i915_gem_object;
struct intel_engine_cs;
struct i915_gem_batch_pool {
@@ -19,7 +20,7 @@ struct i915_gem_batch_pool {
void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
struct intel_engine_cs *engine);
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
-struct drm_i915_gem_object*
+struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
#endif /* I915_GEM_BATCH_POOL_H */
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.h b/drivers/gpu/drm/i915/i915_gem_clflush.h
deleted file mode 100644
index f390247561b3..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_clflush.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __I915_GEM_CLFLUSH_H__
-#define __I915_GEM_CLFLUSH_H__
-
-struct drm_i915_private;
-struct drm_i915_gem_object;
-
-bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
- unsigned int flags);
-#define I915_CLFLUSH_FORCE BIT(0)
-#define I915_CLFLUSH_SYNC BIT(1)
-
-#endif /* __I915_GEM_CLFLUSH_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 060f5903544a..a5783c4cb98b 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -28,6 +28,8 @@
#include <drm/i915_drm.h>
+#include "gem/i915_gem_context.h"
+
#include "i915_drv.h"
#include "intel_drv.h"
#include "i915_trace.h"
@@ -36,15 +38,8 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
bool fail_if_busy:1;
} igt_evict_ctl;)
-static bool ggtt_is_idle(struct drm_i915_private *i915)
-{
- return !i915->gt.active_requests;
-}
-
static int ggtt_flush(struct drm_i915_private *i915)
{
- int err;
-
/*
* Not everything in the GGTT is tracked via vma (otherwise we
* could evict as required with minimal stalling) so we are forced
@@ -52,19 +47,10 @@ static int ggtt_flush(struct drm_i915_private *i915)
* the hopes that we can then remove contexts and the like only
* bound by their active reference.
*/
- err = i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines);
- if (err)
- return err;
-
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- return err;
-
- GEM_BUG_ON(!ggtt_is_idle(i915));
- return 0;
+ return i915_gem_wait_for_idle(i915,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
}
static bool
@@ -222,24 +208,17 @@ search_again:
* us a termination condition, when the last retired context is
* the kernel's there is no more we can evict.
*/
- if (!ggtt_is_idle(dev_priv)) {
- if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
- return -EBUSY;
+ if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
+ return -EBUSY;
- ret = ggtt_flush(dev_priv);
- if (ret)
- return ret;
+ ret = ggtt_flush(dev_priv);
+ if (ret)
+ return ret;
- cond_resched();
- goto search_again;
- }
+ cond_resched();
- /*
- * If we still have pending pageflip completions, drop
- * back to userspace to give our workqueues time to
- * acquire our locks and unpin the old scanouts.
- */
- return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
+ flags |= PIN_NONBLOCK;
+ goto search_again;
found:
/* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 3084f52e3372..0bf53ac1c835 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -22,7 +22,10 @@
*/
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "i915_scatterlist.h"
+#include "i915_vgpu.h"
/**
* DOC: fence register handling
@@ -56,7 +59,7 @@
#define pipelined 0
-static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
+static void i965_write_fence_reg(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
i915_reg_t fence_reg_lo, fence_reg_hi;
@@ -92,9 +95,10 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
}
if (!pipelined) {
- struct drm_i915_private *dev_priv = fence->i915;
+ struct intel_uncore *uncore = &fence->i915->uncore;
- /* To w/a incoherency with non-atomic 64-bit register updates,
+ /*
+ * To w/a incoherency with non-atomic 64-bit register updates,
* we split the 64-bit update into two 32-bit writes. In order
* for a partial fence not to be evaluated between writes, we
* precede the update with write to turn off the fence register,
@@ -103,16 +107,16 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
* For extra levels of paranoia, we make sure each step lands
* before applying the next step.
*/
- I915_WRITE(fence_reg_lo, 0);
- POSTING_READ(fence_reg_lo);
+ intel_uncore_write_fw(uncore, fence_reg_lo, 0);
+ intel_uncore_posting_read_fw(uncore, fence_reg_lo);
- I915_WRITE(fence_reg_hi, upper_32_bits(val));
- I915_WRITE(fence_reg_lo, lower_32_bits(val));
- POSTING_READ(fence_reg_lo);
+ intel_uncore_write_fw(uncore, fence_reg_hi, upper_32_bits(val));
+ intel_uncore_write_fw(uncore, fence_reg_lo, lower_32_bits(val));
+ intel_uncore_posting_read_fw(uncore, fence_reg_lo);
}
}
-static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
+static void i915_write_fence_reg(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
u32 val;
@@ -144,15 +148,15 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
}
if (!pipelined) {
- struct drm_i915_private *dev_priv = fence->i915;
+ struct intel_uncore *uncore = &fence->i915->uncore;
i915_reg_t reg = FENCE_REG(fence->id);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_uncore_write_fw(uncore, reg, val);
+ intel_uncore_posting_read_fw(uncore, reg);
}
}
-static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
+static void i830_write_fence_reg(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
u32 val;
@@ -176,18 +180,19 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
}
if (!pipelined) {
- struct drm_i915_private *dev_priv = fence->i915;
+ struct intel_uncore *uncore = &fence->i915->uncore;
i915_reg_t reg = FENCE_REG(fence->id);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_uncore_write_fw(uncore, reg, val);
+ intel_uncore_posting_read_fw(uncore, reg);
}
}
-static void fence_write(struct drm_i915_fence_reg *fence,
+static void fence_write(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
- /* Previous access through the fence register is marshalled by
+ /*
+ * Previous access through the fence register is marshalled by
* the mb() inside the fault handlers (i915_gem_release_mmaps)
* and explicitly managed for internal users.
*/
@@ -199,14 +204,15 @@ static void fence_write(struct drm_i915_fence_reg *fence,
else
i965_write_fence_reg(fence, vma);
- /* Access through the fenced region afterwards is
+ /*
+ * Access through the fenced region afterwards is
* ordered by the posting reads whilst writing the registers.
*/
fence->dirty = false;
}
-static int fence_update(struct drm_i915_fence_reg *fence,
+static int fence_update(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
intel_wakeref_t wakeref;
@@ -251,7 +257,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
old->fence = NULL;
}
- list_move(&fence->link, &fence->i915->mm.fence_list);
+ list_move(&fence->link, &fence->i915->ggtt.fence_list);
}
/*
@@ -264,7 +270,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
* be cleared before we can use any other fences to ensure that
* the new fences do not overlap the elided clears, confusing HW.
*/
- wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
+ wakeref = intel_runtime_pm_get_if_in_use(&fence->i915->runtime_pm);
if (!wakeref) {
GEM_BUG_ON(vma);
return 0;
@@ -275,10 +281,10 @@ static int fence_update(struct drm_i915_fence_reg *fence,
if (vma) {
vma->fence = fence;
- list_move_tail(&fence->link, &fence->i915->mm.fence_list);
+ list_move_tail(&fence->link, &fence->i915->ggtt.fence_list);
}
- intel_runtime_pm_put(fence->i915, wakeref);
+ intel_runtime_pm_put(&fence->i915->runtime_pm, wakeref);
return 0;
}
@@ -295,7 +301,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
*/
int i915_vma_put_fence(struct i915_vma *vma)
{
- struct drm_i915_fence_reg *fence = vma->fence;
+ struct i915_fence_reg *fence = vma->fence;
if (!fence)
return 0;
@@ -306,11 +312,11 @@ int i915_vma_put_fence(struct i915_vma *vma)
return fence_update(fence, NULL);
}
-static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
+static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
{
- struct drm_i915_fence_reg *fence;
+ struct i915_fence_reg *fence;
- list_for_each_entry(fence, &dev_priv->mm.fence_list, link) {
+ list_for_each_entry(fence, &i915->ggtt.fence_list, link) {
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
if (fence->pin_count)
@@ -320,7 +326,7 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
}
/* Wait for completion of pending flips which consume fences */
- if (intel_has_pending_fb_unpin(dev_priv))
+ if (intel_has_pending_fb_unpin(i915))
return ERR_PTR(-EAGAIN);
return ERR_PTR(-EDEADLK);
@@ -344,17 +350,17 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
*
* 0 on success, negative error code on failure.
*/
-int
-i915_vma_pin_fence(struct i915_vma *vma)
+int i915_vma_pin_fence(struct i915_vma *vma)
{
- struct drm_i915_fence_reg *fence;
+ struct i915_fence_reg *fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
int err;
- /* Note that we revoke fences on runtime suspend. Therefore the user
+ /*
+ * Note that we revoke fences on runtime suspend. Therefore the user
* must keep the device awake whilst using the fence.
*/
- assert_rpm_wakelock_held(vma->vm->i915);
+ assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
/* Just update our place in the LRU if our fence is getting reused. */
if (vma->fence) {
@@ -363,7 +369,7 @@ i915_vma_pin_fence(struct i915_vma *vma)
fence->pin_count++;
if (!fence->dirty) {
list_move_tail(&fence->link,
- &fence->i915->mm.fence_list);
+ &fence->i915->ggtt.fence_list);
return 0;
}
} else if (set) {
@@ -393,28 +399,27 @@ out_unpin:
/**
* i915_reserve_fence - Reserve a fence for vGPU
- * @dev_priv: i915 device private
+ * @i915: i915 device private
*
* This function walks the fence regs looking for a free one and remove
* it from the fence_list. It is used to reserve fence for vGPU to use.
*/
-struct drm_i915_fence_reg *
-i915_reserve_fence(struct drm_i915_private *dev_priv)
+struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
{
- struct drm_i915_fence_reg *fence;
+ struct i915_fence_reg *fence;
int count;
int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
/* Keep at least one fence available for the display engine. */
count = 0;
- list_for_each_entry(fence, &dev_priv->mm.fence_list, link)
+ list_for_each_entry(fence, &i915->ggtt.fence_list, link)
count += !fence->pin_count;
if (count <= 1)
return ERR_PTR(-ENOSPC);
- fence = fence_find(dev_priv);
+ fence = fence_find(i915);
if (IS_ERR(fence))
return fence;
@@ -435,28 +440,28 @@ i915_reserve_fence(struct drm_i915_private *dev_priv)
*
* This function add a reserved fence register from vGPU to the fence_list.
*/
-void i915_unreserve_fence(struct drm_i915_fence_reg *fence)
+void i915_unreserve_fence(struct i915_fence_reg *fence)
{
lockdep_assert_held(&fence->i915->drm.struct_mutex);
- list_add(&fence->link, &fence->i915->mm.fence_list);
+ list_add(&fence->link, &fence->i915->ggtt.fence_list);
}
/**
* i915_gem_restore_fences - restore fence state
- * @dev_priv: i915 device private
+ * @i915: i915 device private
*
* Restore the hw fence state to match the software tracking again, to be called
* after a gpu reset and on resume. Note that on runtime suspend we only cancel
* the fences, to be reacquired by the user later.
*/
-void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
+void i915_gem_restore_fences(struct drm_i915_private *i915)
{
int i;
rcu_read_lock(); /* keep obj alive as we dereference */
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+ for (i = 0; i < i915->ggtt.num_fences; i++) {
+ struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
struct i915_vma *vma = READ_ONCE(reg->vma);
GEM_BUG_ON(vma && vma->fence != reg);
@@ -523,18 +528,18 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
/**
* i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
- * @dev_priv: i915 device private
+ * @i915: i915 device private
*
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
-void
-i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
+static void detect_bit_6_swizzle(struct drm_i915_private *i915)
{
+ struct intel_uncore *uncore = &i915->uncore;
u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) {
+ if (INTEL_GEN(i915) >= 8 || IS_VALLEYVIEW(i915)) {
/*
* On BDW+, swizzling is not used. We leave the CPU memory
* controller in charge of optimizing memory accesses without
@@ -544,9 +549,9 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (INTEL_GEN(dev_priv) >= 6) {
- if (dev_priv->preserve_bios_swizzle) {
- if (I915_READ(DISP_ARB_CTL) &
+ } else if (INTEL_GEN(i915) >= 6) {
+ if (i915->preserve_bios_swizzle) {
+ if (intel_uncore_read(uncore, DISP_ARB_CTL) &
DISP_TILE_SURFACE_SWIZZLING) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
@@ -556,15 +561,17 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
}
} else {
u32 dimm_c0, dimm_c1;
- dimm_c0 = I915_READ(MAD_DIMM_C0);
- dimm_c1 = I915_READ(MAD_DIMM_C1);
+ dimm_c0 = intel_uncore_read(uncore, MAD_DIMM_C0);
+ dimm_c1 = intel_uncore_read(uncore, MAD_DIMM_C1);
dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
- /* Enable swizzling when the channels are populated
+ /*
+ * Enable swizzling when the channels are populated
* with identically sized dimms. We don't need to check
* the 3rd channel because no cpu with gpu attached
* ships in that configuration. Also, swizzling only
- * makes sense for 2 channels anyway. */
+ * makes sense for 2 channels anyway.
+ */
if (dimm_c0 == dimm_c1) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
@@ -573,20 +580,23 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
}
- } else if (IS_GEN(dev_priv, 5)) {
- /* On Ironlake whatever DRAM config, GPU always do
+ } else if (IS_GEN(i915, 5)) {
+ /*
+ * On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if (IS_GEN(dev_priv, 2)) {
- /* As far as we know, the 865 doesn't have these bit 6
+ } else if (IS_GEN(i915, 2)) {
+ /*
+ * As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (IS_G45(dev_priv) || IS_I965G(dev_priv) || IS_G33(dev_priv)) {
- /* The 965, G33, and newer, have a very flexible memory
+ } else if (IS_G45(i915) || IS_I965G(i915) || IS_G33(i915)) {
+ /*
+ * The 965, G33, and newer, have a very flexible memory
* configuration. It will enable dual-channel mode
* (interleaving) on as much memory as it can, and the GPU
* will additionally sometimes enable different bit 6
@@ -612,14 +622,16 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
* banks of memory are paired and unswizzled on the
* uneven portion, so leave that as unknown.
*/
- if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
+ if (intel_uncore_read(uncore, C0DRB3) ==
+ intel_uncore_read(uncore, C1DRB3)) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
}
} else {
- u32 dcc;
+ u32 dcc = intel_uncore_read(uncore, DCC);
- /* On 9xx chipsets, channel interleave by the CPU is
+ /*
+ * On 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
* nor the GPU do swizzling. For dual channel interleaved,
* the GPU's interleave is bit 9 and 10 for X tiled, and bit
@@ -627,7 +639,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
* can be based on either bit 11 (haven't seen this yet) or
* bit 17 (common).
*/
- dcc = I915_READ(DCC);
switch (dcc & DCC_ADDRESSING_MODE_MASK) {
case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
@@ -636,7 +647,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
break;
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
if (dcc & DCC_CHANNEL_XOR_DISABLE) {
- /* This is the base swizzling by the GPU for
+ /*
+ * This is the base swizzling by the GPU for
* tiled buffers.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
@@ -654,8 +666,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
}
/* check for L-shaped memory aka modified enhanced addressing */
- if (IS_GEN(dev_priv, 4) &&
- !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
+ if (IS_GEN(i915, 4) &&
+ !(intel_uncore_read(uncore, DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
@@ -670,7 +682,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
- /* Userspace likes to explode if it sees unknown swizzling,
+ /*
+ * Userspace likes to explode if it sees unknown swizzling,
* so lie. We will finish the lie when reporting through
* the get-tiling-ioctl by reporting the physical swizzle
* mode as unknown instead.
@@ -679,13 +692,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
* bit17 dependent, and so we need to also prevent the pages
* from being moved.
*/
- dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+ i915->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
- dev_priv->mm.bit_6_swizzle_x = swizzle_x;
- dev_priv->mm.bit_6_swizzle_y = swizzle_y;
+ i915->mm.bit_6_swizzle_x = swizzle_x;
+ i915->mm.bit_6_swizzle_y = swizzle_y;
}
/*
@@ -693,8 +706,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
* bit 17 of its physical address and therefore being interpreted differently
* by the GPU.
*/
-static void
-i915_gem_swizzle_page(struct page *page)
+static void i915_gem_swizzle_page(struct page *page)
{
char temp[64];
char *vaddr;
@@ -783,3 +795,42 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
i++;
}
}
+
+void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ int num_fences;
+ int i;
+
+ INIT_LIST_HEAD(&ggtt->fence_list);
+ INIT_LIST_HEAD(&ggtt->userfault_list);
+ intel_wakeref_auto_init(&ggtt->userfault_wakeref, &i915->runtime_pm);
+
+ detect_bit_6_swizzle(i915);
+
+ if (INTEL_GEN(i915) >= 7 &&
+ !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
+ num_fences = 32;
+ else if (INTEL_GEN(i915) >= 4 ||
+ IS_I945G(i915) || IS_I945GM(i915) ||
+ IS_G33(i915) || IS_PINEVIEW(i915))
+ num_fences = 16;
+ else
+ num_fences = 8;
+
+ if (intel_vgpu_active(i915))
+ num_fences = intel_uncore_read(&i915->uncore,
+ vgtif_reg(avail_rs.fence_num));
+
+ /* Initialize fence registers to zero */
+ for (i = 0; i < num_fences; i++) {
+ struct i915_fence_reg *fence = &ggtt->fence_regs[i];
+
+ fence->i915 = i915;
+ fence->id = i;
+ list_add_tail(&fence->link, &ggtt->fence_list);
+ }
+ ggtt->num_fences = num_fences;
+
+ i915_gem_restore_fences(i915);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
index 09dcaf14121b..d2da98828179 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
@@ -26,13 +26,17 @@
#define __I915_FENCE_REG_H__
#include <linux/list.h>
+#include <linux/types.h>
+struct drm_i915_gem_object;
struct drm_i915_private;
+struct i915_ggtt;
struct i915_vma;
+struct sg_table;
#define I965_FENCE_PAGE 4096UL
-struct drm_i915_fence_reg {
+struct i915_fence_reg {
struct list_head link;
struct drm_i915_private *i915;
struct i915_vma *vma;
@@ -49,4 +53,17 @@ struct drm_i915_fence_reg {
bool dirty;
};
+/* i915_gem_fence_reg.c */
+struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915);
+void i915_unreserve_fence(struct i915_fence_reg *fence);
+
+void i915_gem_restore_fences(struct drm_i915_private *i915);
+
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+
+void i915_ggtt_init_fences(struct i915_ggtt *ggtt);
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8f460cc4cc1f..8ab820145ea6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -35,12 +35,13 @@
#include <drm/i915_drm.h>
+#include "display/intel_frontbuffer.h"
+
#include "i915_drv.h"
-#include "i915_vgpu.h"
-#include "i915_reset.h"
+#include "i915_scatterlist.h"
#include "i915_trace.h"
+#include "i915_vgpu.h"
#include "intel_drv.h"
-#include "intel_frontbuffer.h"
#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
@@ -108,22 +109,26 @@
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
-static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
+static void gen6_ggtt_invalidate(struct drm_i915_private *i915)
{
+ struct intel_uncore *uncore = &i915->uncore;
+
/*
* Note that as an uncached mmio write, this will flush the
* WCB of the writes into the GGTT before it triggers the invalidate.
*/
- I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
}
-static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
+static void guc_ggtt_invalidate(struct drm_i915_private *i915)
{
- gen6_ggtt_invalidate(dev_priv);
- I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+ struct intel_uncore *uncore = &i915->uncore;
+
+ gen6_ggtt_invalidate(i915);
+ intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}
-static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
+static void gmch_ggtt_invalidate(struct drm_i915_private *i915)
{
intel_gtt_chipset_flush();
}
@@ -341,11 +346,11 @@ static struct page *stash_pop_page(struct pagestash *stash)
static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
{
- int nr;
+ unsigned int nr;
spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
- nr = min_t(int, pvec->nr, pagevec_space(&stash->pvec));
+ nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
memcpy(stash->pvec.pages + stash->pvec.nr,
pvec->pages + pvec->nr - nr,
sizeof(pvec->pages[0]) * nr);
@@ -399,7 +404,8 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
page = stack.pages[--stack.nr];
/* Merge spare WC pages to the global stash */
- stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
+ if (stack.nr)
+ stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
/* Push any surplus WC pages onto the local VM stash */
if (stack.nr)
@@ -469,13 +475,17 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page)
*/
might_sleep();
spin_lock(&vm->free_pages.lock);
- if (!pagevec_add(&vm->free_pages.pvec, page))
+ while (!pagevec_space(&vm->free_pages.pvec))
vm_free_pages_release(vm, false);
+ GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
+ pagevec_add(&vm->free_pages.pvec, page);
spin_unlock(&vm->free_pages.lock);
}
static void i915_address_space_init(struct i915_address_space *vm, int subclass)
{
+ kref_init(&vm->ref);
+
/*
* The vm->mutex must be reclaim safe (for use in the shrinker).
* Do a dummy acquire now under fs_reclaim so that any allocation
@@ -652,7 +662,8 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
return ERR_PTR(-ENOMEM);
}
- pt->used_ptes = 0;
+ atomic_set(&pt->used, 0);
+
return pt;
}
@@ -674,117 +685,71 @@ static void gen6_initialize_pt(struct i915_address_space *vm,
fill32_px(vm, pt, vm->scratch_pte);
}
-static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
+static struct i915_page_directory *__alloc_pd(void)
{
struct i915_page_directory *pd;
- pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
+ pd = kmalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
+
if (unlikely(!pd))
- return ERR_PTR(-ENOMEM);
+ return NULL;
- if (unlikely(setup_px(vm, pd))) {
- kfree(pd);
- return ERR_PTR(-ENOMEM);
- }
+ memset(&pd->base, 0, sizeof(pd->base));
+ atomic_set(&pd->used, 0);
+ spin_lock_init(&pd->lock);
- pd->used_pdes = 0;
- return pd;
-}
+ /* for safety */
+ pd->entry[0] = NULL;
-static void free_pd(struct i915_address_space *vm,
- struct i915_page_directory *pd)
-{
- cleanup_px(vm, pd);
- kfree(pd);
-}
-
-static void gen8_initialize_pd(struct i915_address_space *vm,
- struct i915_page_directory *pd)
-{
- fill_px(vm, pd,
- gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
- memset_p((void **)pd->page_table, vm->scratch_pt, I915_PDES);
+ return pd;
}
-static int __pdp_init(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp)
+static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
{
- const unsigned int pdpes = i915_pdpes_per_pdp(vm);
+ struct i915_page_directory *pd;
- pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
- I915_GFP_ALLOW_FAIL);
- if (unlikely(!pdp->page_directory))
- return -ENOMEM;
+ pd = __alloc_pd();
+ if (unlikely(!pd))
+ return ERR_PTR(-ENOMEM);
- memset_p((void **)pdp->page_directory, vm->scratch_pd, pdpes);
+ if (unlikely(setup_px(vm, pd))) {
+ kfree(pd);
+ return ERR_PTR(-ENOMEM);
+ }
- return 0;
+ return pd;
}
-static void __pdp_fini(struct i915_page_directory_pointer *pdp)
+static inline bool pd_has_phys_page(const struct i915_page_directory * const pd)
{
- kfree(pdp->page_directory);
- pdp->page_directory = NULL;
+ return pd->base.page;
}
-static struct i915_page_directory_pointer *
-alloc_pdp(struct i915_address_space *vm)
+static void free_pd(struct i915_address_space *vm,
+ struct i915_page_directory *pd)
{
- struct i915_page_directory_pointer *pdp;
- int ret = -ENOMEM;
-
- GEM_BUG_ON(!i915_vm_is_4lvl(vm));
-
- pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
- if (!pdp)
- return ERR_PTR(-ENOMEM);
-
- ret = __pdp_init(vm, pdp);
- if (ret)
- goto fail_bitmap;
-
- ret = setup_px(vm, pdp);
- if (ret)
- goto fail_page_m;
-
- return pdp;
+ if (likely(pd_has_phys_page(pd)))
+ cleanup_px(vm, pd);
-fail_page_m:
- __pdp_fini(pdp);
-fail_bitmap:
- kfree(pdp);
-
- return ERR_PTR(ret);
+ kfree(pd);
}
-static void free_pdp(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp)
+static void init_pd_with_page(struct i915_address_space *vm,
+ struct i915_page_directory * const pd,
+ struct i915_page_table *pt)
{
- __pdp_fini(pdp);
-
- if (!i915_vm_is_4lvl(vm))
- return;
-
- cleanup_px(vm, pdp);
- kfree(pdp);
+ fill_px(vm, pd, gen8_pde_encode(px_dma(pt), I915_CACHE_LLC));
+ memset_p(pd->entry, pt, 512);
}
-static void gen8_initialize_pdp(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp)
+static void init_pd(struct i915_address_space *vm,
+ struct i915_page_directory * const pd,
+ struct i915_page_directory * const to)
{
- gen8_ppgtt_pdpe_t scratch_pdpe;
-
- scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
-
- fill_px(vm, pdp, scratch_pdpe);
-}
+ GEM_DEBUG_BUG_ON(!pd_has_phys_page(pd));
-static void gen8_initialize_pml4(struct i915_address_space *vm,
- struct i915_pml4 *pml4)
-{
- fill_px(vm, pml4,
- gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
- memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
+ fill_px(vm, pd, gen8_pdpe_encode(px_dma(to), I915_CACHE_LLC));
+ memset_p(pd->entry, to, 512);
}
/*
@@ -793,7 +758,7 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
* context switching/execlist queuing code takes extra steps
* to ensure that tlbs are flushed.
*/
-static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
+static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt)
{
ppgtt->pd_dirty_engines = ALL_ENGINES;
}
@@ -808,17 +773,12 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
unsigned int num_entries = gen8_pte_count(start, length);
gen8_pte_t *vaddr;
- GEM_BUG_ON(num_entries > pt->used_ptes);
-
- pt->used_ptes -= num_entries;
- if (!pt->used_ptes)
- return true;
-
vaddr = kmap_atomic_px(pt);
memset64(vaddr + gen8_pte_index(start), vm->scratch_pte, num_entries);
kunmap_atomic(vaddr);
- return false;
+ GEM_BUG_ON(num_entries > atomic_read(&pt->used));
+ return !atomic_sub_return(num_entries, &pt->used);
}
static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
@@ -828,8 +788,6 @@ static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
{
gen8_pde_t *vaddr;
- pd->page_table[pde] = pt;
-
vaddr = kmap_atomic_px(pd);
vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
kunmap_atomic(vaddr);
@@ -843,30 +801,37 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
u32 pde;
gen8_for_each_pde(pt, pd, start, length, pde) {
+ bool free = false;
+
GEM_BUG_ON(pt == vm->scratch_pt);
if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
continue;
- gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
- GEM_BUG_ON(!pd->used_pdes);
- pd->used_pdes--;
+ spin_lock(&pd->lock);
+ if (!atomic_read(&pt->used)) {
+ gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
+ pd->entry[pde] = vm->scratch_pt;
- free_pt(vm, pt);
+ GEM_BUG_ON(!atomic_read(&pd->used));
+ atomic_dec(&pd->used);
+ free = true;
+ }
+ spin_unlock(&pd->lock);
+ if (free)
+ free_pt(vm, pt);
}
- return !pd->used_pdes;
+ return !atomic_read(&pd->used);
}
-static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp,
+static void gen8_ppgtt_set_pdpe(struct i915_page_directory *pdp,
struct i915_page_directory *pd,
unsigned int pdpe)
{
gen8_ppgtt_pdpe_t *vaddr;
- pdp->page_directory[pdpe] = pd;
- if (!i915_vm_is_4lvl(vm))
+ if (!pd_has_phys_page(pdp))
return;
vaddr = kmap_atomic_px(pdp);
@@ -878,42 +843,49 @@ static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
* Caller can use the return value to update higher-level entries
*/
static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp,
+ struct i915_page_directory * const pdp,
u64 start, u64 length)
{
struct i915_page_directory *pd;
unsigned int pdpe;
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
+ bool free = false;
+
GEM_BUG_ON(pd == vm->scratch_pd);
if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
continue;
- gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
- GEM_BUG_ON(!pdp->used_pdpes);
- pdp->used_pdpes--;
+ spin_lock(&pdp->lock);
+ if (!atomic_read(&pd->used)) {
+ gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
+ pdp->entry[pdpe] = vm->scratch_pd;
- free_pd(vm, pd);
+ GEM_BUG_ON(!atomic_read(&pdp->used));
+ atomic_dec(&pdp->used);
+ free = true;
+ }
+ spin_unlock(&pdp->lock);
+ if (free)
+ free_pd(vm, pd);
}
- return !pdp->used_pdpes;
+ return !atomic_read(&pdp->used);
}
static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
u64 start, u64 length)
{
- gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
+ gen8_ppgtt_clear_pdp(vm, i915_vm_to_ppgtt(vm)->pd, start, length);
}
-static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
- struct i915_page_directory_pointer *pdp,
+static void gen8_ppgtt_set_pml4e(struct i915_page_directory *pml4,
+ struct i915_page_directory *pdp,
unsigned int pml4e)
{
gen8_ppgtt_pml4e_t *vaddr;
- pml4->pdps[pml4e] = pdp;
-
vaddr = kmap_atomic_px(pml4);
vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
kunmap_atomic(vaddr);
@@ -926,22 +898,29 @@ static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
u64 start, u64 length)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_pml4 *pml4 = &ppgtt->pml4;
- struct i915_page_directory_pointer *pdp;
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_page_directory * const pml4 = ppgtt->pd;
+ struct i915_page_directory *pdp;
unsigned int pml4e;
GEM_BUG_ON(!i915_vm_is_4lvl(vm));
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
+ bool free = false;
GEM_BUG_ON(pdp == vm->scratch_pdp);
if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
continue;
- gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
-
- free_pdp(vm, pdp);
+ spin_lock(&pml4->lock);
+ if (!atomic_read(&pdp->used)) {
+ gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
+ pml4->entry[pml4e] = vm->scratch_pdp;
+ free = true;
+ }
+ spin_unlock(&pml4->lock);
+ if (free)
+ free_pd(vm, pdp);
}
}
@@ -972,8 +951,8 @@ static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
}
static __always_inline bool
-gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
- struct i915_page_directory_pointer *pdp,
+gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt,
+ struct i915_page_directory *pdp,
struct sgt_dma *iter,
struct gen8_insert_pte *idx,
enum i915_cache_level cache_level,
@@ -985,8 +964,8 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
bool ret;
GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
- pd = pdp->page_directory[idx->pdpe];
- vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
+ pd = i915_pd_entry(pdp, idx->pdpe);
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde));
do {
vaddr[idx->pte] = pte_encode | iter->dma;
@@ -1016,11 +995,11 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
}
GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
- pd = pdp->page_directory[idx->pdpe];
+ pd = pdp->entry[idx->pdpe];
}
kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde));
}
} while (1);
kunmap_atomic(vaddr);
@@ -1033,18 +1012,18 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
enum i915_cache_level cache_level,
u32 flags)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma);
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
- gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
+ gen8_ppgtt_insert_pte_entries(ppgtt, ppgtt->pd, &iter, &idx,
cache_level, flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
}
static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
- struct i915_page_directory_pointer **pdps,
+ struct i915_page_directory *pml4,
struct sgt_dma *iter,
enum i915_cache_level cache_level,
u32 flags)
@@ -1055,8 +1034,9 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
do {
struct gen8_insert_pte idx = gen8_insert_pte(start);
- struct i915_page_directory_pointer *pdp = pdps[idx.pml4e];
- struct i915_page_directory *pd = pdp->page_directory[idx.pdpe];
+ struct i915_page_directory *pdp =
+ i915_pdp_entry(pml4, idx.pml4e);
+ struct i915_page_directory *pd = i915_pd_entry(pdp, idx.pdpe);
unsigned int page_size;
bool maybe_64K = false;
gen8_pte_t encode = pte_encode;
@@ -1074,7 +1054,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
vaddr = kmap_atomic_px(pd);
} else {
- struct i915_page_table *pt = pd->page_table[idx.pde];
+ struct i915_page_table *pt = i915_pt_entry(pd, idx.pde);
index = idx.pte;
max = GEN8_PTES;
@@ -1149,7 +1129,8 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
u16 i;
encode = vma->vm->scratch_pte;
- vaddr = kmap_atomic_px(pd->page_table[idx.pde]);
+ vaddr = kmap_atomic_px(i915_pt_entry(pd,
+ idx.pde));
for (i = 1; i < index; i += 16)
memset64(vaddr + i, encode, 15);
@@ -1167,17 +1148,18 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
enum i915_cache_level cache_level,
u32 flags)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma);
- struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
+ struct i915_page_directory * const pml4 = ppgtt->pd;
if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
- gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level,
+ gen8_ppgtt_insert_huge_entries(vma, pml4, &iter, cache_level,
flags);
} else {
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
- while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
+ while (gen8_ppgtt_insert_pte_entries(ppgtt,
+ i915_pdp_entry(pml4, idx.pml4e++),
&iter, &idx, cache_level,
flags))
GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
@@ -1192,8 +1174,8 @@ static void gen8_free_page_tables(struct i915_address_space *vm,
int i;
for (i = 0; i < I915_PDES; i++) {
- if (pd->page_table[i] != vm->scratch_pt)
- free_pt(vm, pd->page_table[i]);
+ if (pd->entry[i] != vm->scratch_pt)
+ free_pt(vm, pd->entry[i]);
}
}
@@ -1207,9 +1189,8 @@ static int gen8_init_scratch(struct i915_address_space *vm)
*/
if (vm->has_read_only &&
vm->i915->kernel_context &&
- vm->i915->kernel_context->ppgtt) {
- struct i915_address_space *clone =
- &vm->i915->kernel_context->ppgtt->vm;
+ vm->i915->kernel_context->vm) {
+ struct i915_address_space *clone = vm->i915->kernel_context->vm;
GEM_BUG_ON(!clone->has_read_only);
@@ -1243,7 +1224,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
}
if (i915_vm_is_4lvl(vm)) {
- vm->scratch_pdp = alloc_pdp(vm);
+ vm->scratch_pdp = alloc_pd(vm);
if (IS_ERR(vm->scratch_pdp)) {
ret = PTR_ERR(vm->scratch_pdp);
goto free_pd;
@@ -1251,9 +1232,9 @@ static int gen8_init_scratch(struct i915_address_space *vm)
}
gen8_initialize_pt(vm, vm->scratch_pt);
- gen8_initialize_pd(vm, vm->scratch_pd);
+ init_pd_with_page(vm, vm->scratch_pd, vm->scratch_pt);
if (i915_vm_is_4lvl(vm))
- gen8_initialize_pdp(vm, vm->scratch_pdp);
+ init_pd(vm, vm->scratch_pdp, vm->scratch_pd);
return 0;
@@ -1267,7 +1248,7 @@ free_scratch_page:
return ret;
}
-static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
+static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
{
struct i915_address_space *vm = &ppgtt->vm;
struct drm_i915_private *dev_priv = vm->i915;
@@ -1275,7 +1256,7 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
int i;
if (i915_vm_is_4lvl(vm)) {
- const u64 daddr = px_dma(&ppgtt->pml4);
+ const u64 daddr = px_dma(ppgtt->pd);
I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
@@ -1305,55 +1286,58 @@ static void gen8_free_scratch(struct i915_address_space *vm)
return;
if (i915_vm_is_4lvl(vm))
- free_pdp(vm, vm->scratch_pdp);
+ free_pd(vm, vm->scratch_pdp);
free_pd(vm, vm->scratch_pd);
free_pt(vm, vm->scratch_pt);
cleanup_scratch_page(vm);
}
static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp)
+ struct i915_page_directory *pdp)
{
const unsigned int pdpes = i915_pdpes_per_pdp(vm);
int i;
for (i = 0; i < pdpes; i++) {
- if (pdp->page_directory[i] == vm->scratch_pd)
+ if (pdp->entry[i] == vm->scratch_pd)
continue;
- gen8_free_page_tables(vm, pdp->page_directory[i]);
- free_pd(vm, pdp->page_directory[i]);
+ gen8_free_page_tables(vm, pdp->entry[i]);
+ free_pd(vm, pdp->entry[i]);
}
- free_pdp(vm, pdp);
+ free_pd(vm, pdp);
}
-static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
+static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt)
{
+ struct i915_page_directory * const pml4 = ppgtt->pd;
int i;
for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
- if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp)
+ struct i915_page_directory *pdp = i915_pdp_entry(pml4, i);
+
+ if (pdp == ppgtt->vm.scratch_pdp)
continue;
- gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]);
+ gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, pdp);
}
- cleanup_px(&ppgtt->vm, &ppgtt->pml4);
+ free_pd(&ppgtt->vm, pml4);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = vm->i915;
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct drm_i915_private *i915 = vm->i915;
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (intel_vgpu_active(dev_priv))
+ if (intel_vgpu_active(i915))
gen8_ppgtt_notify_vgt(ppgtt, false);
if (i915_vm_is_4lvl(vm))
gen8_ppgtt_cleanup_4lvl(ppgtt);
else
- gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
+ gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pd);
gen8_free_scratch(vm);
}
@@ -1362,129 +1346,190 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
struct i915_page_directory *pd,
u64 start, u64 length)
{
- struct i915_page_table *pt;
+ struct i915_page_table *pt, *alloc = NULL;
u64 from = start;
unsigned int pde;
+ int ret = 0;
+ spin_lock(&pd->lock);
gen8_for_each_pde(pt, pd, start, length, pde) {
- int count = gen8_pte_count(start, length);
+ const int count = gen8_pte_count(start, length);
if (pt == vm->scratch_pt) {
- pd->used_pdes++;
+ spin_unlock(&pd->lock);
- pt = alloc_pt(vm);
+ pt = fetch_and_zero(&alloc);
+ if (!pt)
+ pt = alloc_pt(vm);
if (IS_ERR(pt)) {
- pd->used_pdes--;
+ ret = PTR_ERR(pt);
goto unwind;
}
if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
gen8_initialize_pt(vm, pt);
- gen8_ppgtt_set_pde(vm, pd, pt, pde);
- GEM_BUG_ON(pd->used_pdes > I915_PDES);
+ spin_lock(&pd->lock);
+ if (pd->entry[pde] == vm->scratch_pt) {
+ gen8_ppgtt_set_pde(vm, pd, pt, pde);
+ pd->entry[pde] = pt;
+ atomic_inc(&pd->used);
+ } else {
+ alloc = pt;
+ pt = pd->entry[pde];
+ }
}
- pt->used_ptes += count;
+ atomic_add(count, &pt->used);
}
- return 0;
+ spin_unlock(&pd->lock);
+ goto out;
unwind:
gen8_ppgtt_clear_pd(vm, pd, from, start - from);
- return -ENOMEM;
+out:
+ if (alloc)
+ free_pt(vm, alloc);
+ return ret;
}
static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp,
+ struct i915_page_directory *pdp,
u64 start, u64 length)
{
- struct i915_page_directory *pd;
+ struct i915_page_directory *pd, *alloc = NULL;
u64 from = start;
unsigned int pdpe;
- int ret;
+ int ret = 0;
+ spin_lock(&pdp->lock);
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
if (pd == vm->scratch_pd) {
- pdp->used_pdpes++;
+ spin_unlock(&pdp->lock);
- pd = alloc_pd(vm);
+ pd = fetch_and_zero(&alloc);
+ if (!pd)
+ pd = alloc_pd(vm);
if (IS_ERR(pd)) {
- pdp->used_pdpes--;
+ ret = PTR_ERR(pd);
goto unwind;
}
- gen8_initialize_pd(vm, pd);
- gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
- GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
+ init_pd_with_page(vm, pd, vm->scratch_pt);
+
+ spin_lock(&pdp->lock);
+ if (pdp->entry[pdpe] == vm->scratch_pd) {
+ gen8_ppgtt_set_pdpe(pdp, pd, pdpe);
+ pdp->entry[pdpe] = pd;
+ atomic_inc(&pdp->used);
+ } else {
+ alloc = pd;
+ pd = pdp->entry[pdpe];
+ }
}
+ atomic_inc(&pd->used);
+ spin_unlock(&pdp->lock);
ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
if (unlikely(ret))
goto unwind_pd;
- }
- return 0;
+ spin_lock(&pdp->lock);
+ atomic_dec(&pd->used);
+ }
+ spin_unlock(&pdp->lock);
+ goto out;
unwind_pd:
- if (!pd->used_pdes) {
- gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
- GEM_BUG_ON(!pdp->used_pdpes);
- pdp->used_pdpes--;
+ spin_lock(&pdp->lock);
+ if (atomic_dec_and_test(&pd->used)) {
+ gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
+ GEM_BUG_ON(!atomic_read(&pdp->used));
+ atomic_dec(&pdp->used);
free_pd(vm, pd);
}
+ spin_unlock(&pdp->lock);
unwind:
gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
- return -ENOMEM;
+out:
+ if (alloc)
+ free_pd(vm, alloc);
+ return ret;
}
static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
u64 start, u64 length)
{
return gen8_ppgtt_alloc_pdp(vm,
- &i915_vm_to_ppgtt(vm)->pdp, start, length);
+ i915_vm_to_ppgtt(vm)->pd, start, length);
}
static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
u64 start, u64 length)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_pml4 *pml4 = &ppgtt->pml4;
- struct i915_page_directory_pointer *pdp;
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_page_directory * const pml4 = ppgtt->pd;
+ struct i915_page_directory *pdp, *alloc = NULL;
u64 from = start;
+ int ret = 0;
u32 pml4e;
- int ret;
+ spin_lock(&pml4->lock);
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- if (pml4->pdps[pml4e] == vm->scratch_pdp) {
- pdp = alloc_pdp(vm);
- if (IS_ERR(pdp))
+ if (pdp == vm->scratch_pdp) {
+ spin_unlock(&pml4->lock);
+
+ pdp = fetch_and_zero(&alloc);
+ if (!pdp)
+ pdp = alloc_pd(vm);
+ if (IS_ERR(pdp)) {
+ ret = PTR_ERR(pdp);
goto unwind;
+ }
+
+ init_pd(vm, pdp, vm->scratch_pd);
- gen8_initialize_pdp(vm, pdp);
- gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
+ spin_lock(&pml4->lock);
+ if (pml4->entry[pml4e] == vm->scratch_pdp) {
+ gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
+ pml4->entry[pml4e] = pdp;
+ } else {
+ alloc = pdp;
+ pdp = pml4->entry[pml4e];
+ }
}
+ atomic_inc(&pdp->used);
+ spin_unlock(&pml4->lock);
ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
if (unlikely(ret))
goto unwind_pdp;
- }
- return 0;
+ spin_lock(&pml4->lock);
+ atomic_dec(&pdp->used);
+ }
+ spin_unlock(&pml4->lock);
+ goto out;
unwind_pdp:
- if (!pdp->used_pdpes) {
+ spin_lock(&pml4->lock);
+ if (atomic_dec_and_test(&pdp->used)) {
gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
- free_pdp(vm, pdp);
+ free_pd(vm, pdp);
}
+ spin_unlock(&pml4->lock);
unwind:
gen8_ppgtt_clear_4lvl(vm, from, start - from);
- return -ENOMEM;
+out:
+ if (alloc)
+ free_pd(vm, alloc);
+ return ret;
}
-static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
+static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
{
struct i915_address_space *vm = &ppgtt->vm;
- struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
+ struct i915_page_directory *pdp = ppgtt->pd;
struct i915_page_directory *pd;
u64 start = 0, length = ppgtt->vm.total;
u64 from = start;
@@ -1495,29 +1540,29 @@ static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
if (IS_ERR(pd))
goto unwind;
- gen8_initialize_pd(vm, pd);
- gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
- pdp->used_pdpes++;
+ init_pd_with_page(vm, pd, vm->scratch_pt);
+ gen8_ppgtt_set_pdpe(pdp, pd, pdpe);
+
+ atomic_inc(&pdp->used);
}
- pdp->used_pdpes++; /* never remove */
+ atomic_inc(&pdp->used); /* never remove */
+
return 0;
unwind:
start -= from;
gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
- gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
+ gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
free_pd(vm, pd);
}
- pdp->used_pdpes = 0;
+ atomic_set(&pdp->used, 0);
return -ENOMEM;
}
static void ppgtt_init(struct drm_i915_private *i915,
- struct i915_hw_ppgtt *ppgtt)
+ struct i915_ppgtt *ppgtt)
{
- kref_init(&ppgtt->ref);
-
ppgtt->vm.i915 = i915;
ppgtt->vm.dma = &i915->drm.pdev->dev;
ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
@@ -1537,9 +1582,9 @@ static void ppgtt_init(struct drm_i915_private *i915,
* space.
*
*/
-static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
+static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
{
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
int err;
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
@@ -1566,27 +1611,34 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
if (err)
goto err_free;
+ ppgtt->pd = __alloc_pd();
+ if (!ppgtt->pd) {
+ err = -ENOMEM;
+ goto err_free_scratch;
+ }
+
if (i915_vm_is_4lvl(&ppgtt->vm)) {
- err = setup_px(&ppgtt->vm, &ppgtt->pml4);
+ err = setup_px(&ppgtt->vm, ppgtt->pd);
if (err)
- goto err_scratch;
+ goto err_free_pdp;
- gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4);
+ init_pd(&ppgtt->vm, ppgtt->pd, ppgtt->vm.scratch_pdp);
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
} else {
- err = __pdp_init(&ppgtt->vm, &ppgtt->pdp);
- if (err)
- goto err_scratch;
+ /*
+ * We don't need to setup dma for top level pdp, only
+ * for entries. So point entries to scratch.
+ */
+ memset_p(ppgtt->pd->entry, ppgtt->vm.scratch_pd,
+ GEN8_3LVL_PDPES);
if (intel_vgpu_active(i915)) {
err = gen8_preallocate_top_level_pdp(ppgtt);
- if (err) {
- __pdp_fini(&ppgtt->pdp);
- goto err_scratch;
- }
+ if (err)
+ goto err_free_pdp;
}
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
@@ -1601,7 +1653,9 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
return ppgtt;
-err_scratch:
+err_free_pdp:
+ free_pd(&ppgtt->vm, ppgtt->pd);
+err_free_scratch:
gen8_free_scratch(&ppgtt->vm);
err_free:
kfree(ppgtt);
@@ -1609,7 +1663,7 @@ err_free:
}
/* Write pde (index) from the page directory @pd to the page table @pt */
-static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
+static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
const unsigned int pde,
const struct i915_page_table *pt)
{
@@ -1638,8 +1692,9 @@ static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id) {
/* GFX_MODE is per-ring on gen7+ */
- I915_WRITE(RING_MODE_GEN7(engine),
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ ENGINE_WRITE(engine,
+ RING_MODE_GEN7,
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
}
@@ -1665,15 +1720,16 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
- struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+ const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ const gen6_pte_t scratch_pte = vm->scratch_pte;
unsigned int pde = first_entry / GEN6_PTES;
unsigned int pte = first_entry % GEN6_PTES;
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
- const gen6_pte_t scratch_pte = vm->scratch_pte;
while (num_entries) {
- struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
+ struct i915_page_table * const pt =
+ i915_pt_entry(ppgtt->base.pd, pde++);
const unsigned int count = min(num_entries, GEN6_PTES - pte);
gen6_pte_t *vaddr;
@@ -1681,9 +1737,8 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
num_entries -= count;
- GEM_BUG_ON(count > pt->used_ptes);
- pt->used_ptes -= count;
- if (!pt->used_ptes)
+ GEM_BUG_ON(count > atomic_read(&pt->used));
+ if (!atomic_sub_return(count, &pt->used))
ppgtt->scan_for_unused_pt = true;
/*
@@ -1706,7 +1761,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level cache_level,
u32 flags)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_page_directory * const pd = ppgtt->pd;
unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
unsigned act_pt = first_entry / GEN6_PTES;
unsigned act_pte = first_entry % GEN6_PTES;
@@ -1714,9 +1770,9 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sgt_dma iter = sgt_dma(vma);
gen6_pte_t *vaddr;
- GEM_BUG_ON(ppgtt->pd.page_table[act_pt] == vm->scratch_pt);
+ GEM_BUG_ON(i915_pt_entry(pd, act_pt) == vm->scratch_pt);
- vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
do {
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
@@ -1732,7 +1788,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
if (++act_pte == GEN6_PTES) {
kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
act_pte = 0;
}
} while (1);
@@ -1744,50 +1800,72 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
static int gen6_alloc_va_range(struct i915_address_space *vm,
u64 start, u64 length)
{
- struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- struct i915_page_table *pt;
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_table *pt, *alloc = NULL;
+ intel_wakeref_t wakeref;
u64 from = start;
unsigned int pde;
bool flush = false;
+ int ret = 0;
- gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) {
+ wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
+
+ spin_lock(&pd->lock);
+ gen6_for_each_pde(pt, pd, start, length, pde) {
const unsigned int count = gen6_pte_count(start, length);
if (pt == vm->scratch_pt) {
- pt = alloc_pt(vm);
- if (IS_ERR(pt))
+ spin_unlock(&pd->lock);
+
+ pt = fetch_and_zero(&alloc);
+ if (!pt)
+ pt = alloc_pt(vm);
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
goto unwind_out;
+ }
gen6_initialize_pt(vm, pt);
- ppgtt->base.pd.page_table[pde] = pt;
- if (i915_vma_is_bound(ppgtt->vma,
- I915_VMA_GLOBAL_BIND)) {
- gen6_write_pde(ppgtt, pde, pt);
- flush = true;
+ spin_lock(&pd->lock);
+ if (pd->entry[pde] == vm->scratch_pt) {
+ pd->entry[pde] = pt;
+ if (i915_vma_is_bound(ppgtt->vma,
+ I915_VMA_GLOBAL_BIND)) {
+ gen6_write_pde(ppgtt, pde, pt);
+ flush = true;
+ }
+ } else {
+ alloc = pt;
+ pt = pd->entry[pde];
}
-
- GEM_BUG_ON(pt->used_ptes);
}
- pt->used_ptes += count;
+ atomic_add(count, &pt->used);
}
+ spin_unlock(&pd->lock);
if (flush) {
mark_tlbs_dirty(&ppgtt->base);
- gen6_ggtt_invalidate(ppgtt->base.vm.i915);
+ gen6_ggtt_invalidate(vm->i915);
}
- return 0;
+ goto out;
unwind_out:
gen6_ppgtt_clear_range(vm, from, start - from);
- return -ENOMEM;
+out:
+ if (alloc)
+ free_pt(vm, alloc);
+ intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
+ return ret;
}
-static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
+static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
{
struct i915_address_space * const vm = &ppgtt->base.vm;
+ struct i915_page_directory * const pd = ppgtt->base.pd;
struct i915_page_table *unused;
u32 pde;
int ret;
@@ -1807,8 +1885,9 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
}
gen6_initialize_pt(vm, vm->scratch_pt);
- gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
- ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
+
+ gen6_for_all_pdes(unused, pd, pde)
+ pd->entry[pde] = vm->scratch_pt;
return 0;
}
@@ -1819,24 +1898,77 @@ static void gen6_ppgtt_free_scratch(struct i915_address_space *vm)
cleanup_scratch_page(vm);
}
-static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt)
+static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
{
+ struct i915_page_directory * const pd = ppgtt->base.pd;
struct i915_page_table *pt;
u32 pde;
- gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
+ gen6_for_all_pdes(pt, pd, pde)
if (pt != ppgtt->base.vm.scratch_pt)
free_pt(&ppgtt->base.vm, pt);
}
+struct gen6_ppgtt_cleanup_work {
+ struct work_struct base;
+ struct i915_vma *vma;
+};
+
+static void gen6_ppgtt_cleanup_work(struct work_struct *wrk)
+{
+ struct gen6_ppgtt_cleanup_work *work =
+ container_of(wrk, typeof(*work), base);
+ /* Side note, vma->vm is the GGTT not the ppgtt we just destroyed! */
+ struct drm_i915_private *i915 = work->vma->vm->i915;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ i915_vma_destroy(work->vma);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ kfree(work);
+}
+
+static int nop_set_pages(struct i915_vma *vma)
+{
+ return -ENODEV;
+}
+
+static void nop_clear_pages(struct i915_vma *vma)
+{
+}
+
+static int nop_bind(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ return -ENODEV;
+}
+
+static void nop_unbind(struct i915_vma *vma)
+{
+}
+
+static const struct i915_vma_ops nop_vma_ops = {
+ .set_pages = nop_set_pages,
+ .clear_pages = nop_clear_pages,
+ .bind_vma = nop_bind,
+ .unbind_vma = nop_unbind,
+};
+
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
- struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+ struct gen6_ppgtt_cleanup_work *work = ppgtt->work;
- i915_vma_destroy(ppgtt->vma);
+ /* FIXME remove the struct_mutex to bring the locking under control */
+ INIT_WORK(&work->base, gen6_ppgtt_cleanup_work);
+ work->vma = ppgtt->vma;
+ work->vma->ops = &nop_vma_ops;
+ schedule_work(&work->base);
gen6_ppgtt_free_pd(ppgtt);
gen6_ppgtt_free_scratch(vm);
+ kfree(ppgtt->base.pd);
}
static int pd_vma_set_pages(struct i915_vma *vma)
@@ -1857,15 +1989,15 @@ static int pd_vma_bind(struct i915_vma *vma,
u32 unused)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
- struct gen6_hw_ppgtt *ppgtt = vma->private;
+ struct gen6_ppgtt *ppgtt = vma->private;
u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
struct i915_page_table *pt;
unsigned int pde;
- ppgtt->base.pd.base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
+ ppgtt->base.pd->base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
- gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
+ gen6_for_all_pdes(pt, ppgtt->base.pd, pde)
gen6_write_pde(ppgtt, pde, pt);
mark_tlbs_dirty(&ppgtt->base);
@@ -1876,7 +2008,8 @@ static int pd_vma_bind(struct i915_vma *vma,
static void pd_vma_unbind(struct i915_vma *vma)
{
- struct gen6_hw_ppgtt *ppgtt = vma->private;
+ struct gen6_ppgtt *ppgtt = vma->private;
+ struct i915_page_directory * const pd = ppgtt->base.pd;
struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt;
struct i915_page_table *pt;
unsigned int pde;
@@ -1885,12 +2018,12 @@ static void pd_vma_unbind(struct i915_vma *vma)
return;
/* Free all no longer used page tables */
- gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) {
- if (pt->used_ptes || pt == scratch_pt)
+ gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
+ if (atomic_read(&pt->used) || pt == scratch_pt)
continue;
free_pt(&ppgtt->base.vm, pt);
- ppgtt->base.pd.page_table[pde] = scratch_pt;
+ pd->entry[pde] = scratch_pt;
}
ppgtt->scan_for_unused_pt = false;
@@ -1903,7 +2036,7 @@ static const struct i915_vma_ops pd_vma_ops = {
.unbind_vma = pd_vma_unbind,
};
-static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
+static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
{
struct drm_i915_private *i915 = ppgtt->base.vm.i915;
struct i915_ggtt *ggtt = &i915->ggtt;
@@ -1929,6 +2062,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
INIT_LIST_HEAD(&vma->obj_link);
+ INIT_LIST_HEAD(&vma->closed_link);
mutex_lock(&vma->vm->mutex);
list_add(&vma->vm_link, &vma->vm->unbound_list);
@@ -1937,9 +2071,9 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
return vma;
}
-int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
+int gen6_ppgtt_pin(struct i915_ppgtt *base)
{
- struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
int err;
GEM_BUG_ON(ppgtt->base.vm.closed);
@@ -1971,9 +2105,9 @@ unpin:
return err;
}
-void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
+void gen6_ppgtt_unpin(struct i915_ppgtt *base)
{
- struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
GEM_BUG_ON(!ppgtt->pin_count);
if (--ppgtt->pin_count)
@@ -1982,9 +2116,9 @@ void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
i915_vma_unpin(ppgtt->vma);
}
-void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base)
+void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
{
- struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
if (!ppgtt->pin_count)
return;
@@ -1993,10 +2127,10 @@ void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base)
i915_vma_unpin(ppgtt->vma);
}
-static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
+static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
{
struct i915_ggtt * const ggtt = &i915->ggtt;
- struct gen6_hw_ppgtt *ppgtt;
+ struct gen6_ppgtt *ppgtt;
int err;
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
@@ -2012,9 +2146,21 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
+ ppgtt->work = kmalloc(sizeof(*ppgtt->work), GFP_KERNEL);
+ if (!ppgtt->work) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ ppgtt->base.pd = __alloc_pd();
+ if (!ppgtt->base.pd) {
+ err = -ENOMEM;
+ goto err_work;
+ }
+
err = gen6_ppgtt_init_scratch(ppgtt);
if (err)
- goto err_free;
+ goto err_pd;
ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
if (IS_ERR(ppgtt->vma)) {
@@ -2026,6 +2172,10 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
err_scratch:
gen6_ppgtt_free_scratch(&ppgtt->base.vm);
+err_pd:
+ kfree(ppgtt->base.pd);
+err_work:
+ kfree(ppgtt->work);
err_free:
kfree(ppgtt);
return ERR_PTR(err);
@@ -2077,8 +2227,8 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
return 0;
}
-static struct i915_hw_ppgtt *
-__hw_ppgtt_create(struct drm_i915_private *i915)
+static struct i915_ppgtt *
+__ppgtt_create(struct drm_i915_private *i915)
{
if (INTEL_GEN(i915) < 8)
return gen6_ppgtt_create(i915);
@@ -2086,12 +2236,12 @@ __hw_ppgtt_create(struct drm_i915_private *i915)
return gen8_ppgtt_create(i915);
}
-struct i915_hw_ppgtt *
+struct i915_ppgtt *
i915_ppgtt_create(struct drm_i915_private *i915)
{
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
- ppgtt = __hw_ppgtt_create(i915);
+ ppgtt = __ppgtt_create(i915);
if (IS_ERR(ppgtt))
return ppgtt;
@@ -2117,21 +2267,23 @@ static void ppgtt_destroy_vma(struct i915_address_space *vm)
}
}
-void i915_ppgtt_release(struct kref *kref)
+void i915_vm_release(struct kref *kref)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(kref, struct i915_hw_ppgtt, ref);
+ struct i915_address_space *vm =
+ container_of(kref, struct i915_address_space, ref);
- trace_i915_ppgtt_release(&ppgtt->vm);
+ GEM_BUG_ON(i915_is_ggtt(vm));
+ trace_i915_ppgtt_release(vm);
- ppgtt_destroy_vma(&ppgtt->vm);
+ ppgtt_destroy_vma(vm);
- GEM_BUG_ON(!list_empty(&ppgtt->vm.bound_list));
- GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
+ GEM_BUG_ON(!list_empty(&vm->bound_list));
+ GEM_BUG_ON(!list_empty(&vm->unbound_list));
- ppgtt->vm.cleanup(&ppgtt->vm);
- i915_address_space_fini(&ppgtt->vm);
- kfree(ppgtt);
+ vm->cleanup(vm);
+ i915_address_space_fini(vm);
+
+ kfree(vm);
}
/* Certain Gen5 chipsets require require idling the GPU before
@@ -2145,69 +2297,6 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
}
-static void gen6_check_faults(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 fault;
-
- for_each_engine(engine, dev_priv, id) {
- fault = I915_READ(RING_FAULT_REG(engine));
- if (fault & RING_FAULT_VALID) {
- DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08lx\n"
- "\tAddress space: %s\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- fault & PAGE_MASK,
- fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
- }
- }
-}
-
-static void gen8_check_faults(struct drm_i915_private *dev_priv)
-{
- u32 fault = I915_READ(GEN8_RING_FAULT_REG);
-
- if (fault & RING_FAULT_VALID) {
- u32 fault_data0, fault_data1;
- u64 fault_addr;
-
- fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
- fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
- fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
- ((u64)fault_data0 << 12);
-
- DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08x_%08x\n"
- "\tAddress space: %s\n"
- "\tEngine ID: %d\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- upper_32_bits(fault_addr),
- lower_32_bits(fault_addr),
- fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
- GEN8_RING_FAULT_ENGINE_ID(fault),
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
- }
-}
-
-void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
-{
- /* From GEN8 onwards we only have one 'All Engine Fault Register' */
- if (INTEL_GEN(dev_priv) >= 8)
- gen8_check_faults(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_check_faults(dev_priv);
- else
- return;
-
- i915_clear_error_registers(dev_priv);
-}
-
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
@@ -2526,7 +2615,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
@@ -2546,7 +2635,7 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
struct drm_i915_private *i915 = vma->vm->i915;
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
}
@@ -2564,7 +2653,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
pte_flags |= PTE_READ_ONLY;
if (flags & I915_VMA_LOCAL_BIND) {
- struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
+ struct i915_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
ret = appgtt->vm.allocate_va_range(&appgtt->vm,
@@ -2581,7 +2670,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
if (flags & I915_VMA_GLOBAL_BIND) {
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
vma->vm->insert_entries(vma->vm, vma,
cache_level, pte_flags);
}
@@ -2598,7 +2687,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
struct i915_address_space *vm = vma->vm;
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
vm->clear_range(vm, vma->node.start, vma->size);
}
@@ -2660,10 +2749,10 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node,
*end -= I915_GTT_PAGE_SIZE;
}
-int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
+static int init_aliasing_ppgtt(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = &i915->ggtt;
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
int err;
ppgtt = i915_ppgtt_create(i915);
@@ -2696,25 +2785,51 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
return 0;
err_ppgtt:
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(&ppgtt->vm);
return err;
}
-void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
+static void fini_aliasing_ppgtt(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = &i915->ggtt;
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
if (!ppgtt)
return;
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(&ppgtt->vm);
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
}
+static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
+{
+ u64 size;
+ int ret;
+
+ if (!USES_GUC(ggtt->vm.i915))
+ return 0;
+
+ GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
+ size = ggtt->vm.total - GUC_GGTT_TOP;
+
+ ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
+ GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
+ PIN_NOEVICT);
+ if (ret)
+ DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n");
+
+ return ret;
+}
+
+static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
+{
+ if (drm_mm_node_allocated(&ggtt->uc_fw))
+ drm_mm_remove_node(&ggtt->uc_fw);
+}
+
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
{
/* Let GEM Manage all of the aperture.
@@ -2738,7 +2853,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
* why.
*/
ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
- intel_guc_reserved_gtt_size(&dev_priv->guc));
+ intel_wopcm_guc_size(&dev_priv->wopcm));
ret = intel_vgt_balloon(dev_priv);
if (ret)
@@ -2752,6 +2867,15 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
if (ret)
return ret;
+ /*
+ * The upper portion of the GuC address space has a sizeable hole
+ * (several MB) that is inaccessible by GuC. Reserve this range within
+ * GGTT as it can comfortably hold GuC/HuC firmware images.
+ */
+ ret = ggtt_reserve_guc_top(ggtt);
+ if (ret)
+ goto err_reserve;
+
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
@@ -2764,14 +2888,16 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
- ret = i915_gem_init_aliasing_ppgtt(dev_priv);
+ ret = init_aliasing_ppgtt(dev_priv);
if (ret)
- goto err;
+ goto err_appgtt;
}
return 0;
-err:
+err_appgtt:
+ ggtt_release_guc_top(ggtt);
+err_reserve:
drm_mm_remove_node(&ggtt->error_capture);
return ret;
}
@@ -2789,7 +2915,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
ggtt->vm.closed = true;
mutex_lock(&dev_priv->drm.struct_mutex);
- i915_gem_fini_aliasing_ppgtt(dev_priv);
+ fini_aliasing_ppgtt(dev_priv);
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
WARN_ON(i915_vma_unbind(vma));
@@ -2797,6 +2923,8 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
if (drm_mm_node_allocated(&ggtt->error_capture))
drm_mm_remove_node(&ggtt->error_capture);
+ ggtt_release_guc_top(ggtt);
+
if (drm_mm_initialized(&ggtt->vm.mm)) {
intel_vgt_deballoon(dev_priv);
i915_address_space_fini(&ggtt->vm);
@@ -3280,7 +3408,9 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
size = gen6_get_total_gtt_size(snb_gmch_ctl);
ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
- ggtt->vm.clear_range = gen6_ggtt_clear_range;
+ ggtt->vm.clear_range = nop_clear_range;
+ if (!HAS_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
+ ggtt->vm.clear_range = gen6_ggtt_clear_range;
ggtt->vm.insert_page = gen6_ggtt_insert_page;
ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
ggtt->vm.cleanup = gen6_gmch_remove;
@@ -3369,17 +3499,6 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
if (ret)
return ret;
- /* Trim the GGTT to fit the GuC mappable upper range (when enabled).
- * This is easier than doing range restriction on the fly, as we
- * currently don't have any bits spare to pass in this upper
- * restriction!
- */
- if (USES_GUC(dev_priv)) {
- ggtt->vm.total = min_t(u64, ggtt->vm.total, GUC_GGTT_TOP);
- ggtt->mappable_end =
- min_t(u64, ggtt->mappable_end, ggtt->vm.total);
- }
-
if ((ggtt->vm.total - 1) >> 32) {
DRM_ERROR("We never expected a Global GTT with more than 32bits"
" of address space! Found %lldM!\n",
@@ -3444,6 +3563,8 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
+ i915_ggtt_init_fences(ggtt);
+
/*
* Initialise stolen early so that we may reserve preallocated
* objects for the BIOS to KMS transition.
@@ -3518,8 +3639,11 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
WARN_ON(i915_vma_bind(vma,
obj ? obj->cache_level : 0,
PIN_UPDATE));
- if (obj)
+ if (obj) {
+ i915_gem_object_lock(obj);
WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+ i915_gem_object_unlock(obj);
+ }
lock:
mutex_lock(&ggtt->vm.mutex);
@@ -3608,6 +3732,89 @@ err_st_alloc:
return ERR_PTR(ret);
}
+static struct scatterlist *
+remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
+ unsigned int width, unsigned int height,
+ unsigned int stride,
+ struct sg_table *st, struct scatterlist *sg)
+{
+ unsigned int row;
+
+ for (row = 0; row < height; row++) {
+ unsigned int left = width * I915_GTT_PAGE_SIZE;
+
+ while (left) {
+ dma_addr_t addr;
+ unsigned int length;
+
+ /* We don't need the pages, but need to initialize
+ * the entries so the sg list can be happily traversed.
+ * The only thing we need are DMA addresses.
+ */
+
+ addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
+
+ length = min(left, length);
+
+ st->nents++;
+
+ sg_set_page(sg, NULL, length, 0);
+ sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = length;
+ sg = sg_next(sg);
+
+ offset += length / I915_GTT_PAGE_SIZE;
+ left -= length;
+ }
+
+ offset += stride - width;
+ }
+
+ return sg;
+}
+
+static noinline struct sg_table *
+intel_remap_pages(struct intel_remapped_info *rem_info,
+ struct drm_i915_gem_object *obj)
+{
+ unsigned int size = intel_remapped_info_size(rem_info);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int ret = -ENOMEM;
+ int i;
+
+ /* Allocate target SG list. */
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, size, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ st->nents = 0;
+ sg = st->sgl;
+
+ for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
+ sg = remap_pages(obj, rem_info->plane[i].offset,
+ rem_info->plane[i].width, rem_info->plane[i].height,
+ rem_info->plane[i].stride, st, sg);
+ }
+
+ i915_sg_trim(st);
+
+ return st;
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+
+ DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size);
+
+ return ERR_PTR(ret);
+}
+
static noinline struct sg_table *
intel_partial_pages(const struct i915_ggtt_view *view,
struct drm_i915_gem_object *obj)
@@ -3686,6 +3893,11 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
break;
+ case I915_GGTT_VIEW_REMAPPED:
+ vma->pages =
+ intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
+ break;
+
case I915_GGTT_VIEW_PARTIAL:
vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
break;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index f597f35b109b..812717ccc69b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -38,8 +38,10 @@
#include <linux/mm.h>
#include <linux/pagevec.h>
+#include "gt/intel_reset.h"
+#include "i915_gem_fence_reg.h"
#include "i915_request.h"
-#include "i915_reset.h"
+#include "i915_scatterlist.h"
#include "i915_selftest.h"
#include "i915_timeline.h"
@@ -60,7 +62,7 @@
#define I915_MAX_NUM_FENCE_BITS 6
struct drm_i915_file_private;
-struct drm_i915_fence_reg;
+struct drm_i915_gem_object;
struct i915_vma;
typedef u32 gen6_pte_t;
@@ -161,13 +163,21 @@ typedef u64 gen8_ppgtt_pml4e_t;
#define GEN8_PDE_IPS_64K BIT(11)
#define GEN8_PDE_PS_2M BIT(7)
-struct sg_table;
+#define for_each_sgt_dma(__dmap, __iter, __sgt) \
+ __for_each_sgt_dma(__dmap, __iter, __sgt, I915_GTT_PAGE_SIZE)
+
+struct intel_remapped_plane_info {
+ /* in gtt pages */
+ unsigned int width, height, stride, offset;
+} __packed;
+
+struct intel_remapped_info {
+ struct intel_remapped_plane_info plane[2];
+ unsigned int unused_mbz;
+} __packed;
struct intel_rotation_info {
- struct intel_rotation_plane_info {
- /* tiles */
- unsigned int width, height, stride, offset;
- } plane[2];
+ struct intel_remapped_plane_info plane[2];
} __packed;
struct intel_partial_info {
@@ -179,12 +189,20 @@ enum i915_ggtt_view_type {
I915_GGTT_VIEW_NORMAL = 0,
I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
+ I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
};
static inline void assert_i915_gem_gtt_types(void)
{
BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
+ BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 9*sizeof(unsigned int));
+
+ /* Check that rotation/remapped shares offsets for simplicity */
+ BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
+ offsetof(struct intel_rotation_info, plane[0]));
+ BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) !=
+ offsetofend(struct intel_rotation_info, plane[1]));
/* As we encode the size of each branch inside the union into its type,
* we have to be careful that each branch has a unique size.
@@ -193,6 +211,7 @@ static inline void assert_i915_gem_gtt_types(void)
case I915_GGTT_VIEW_NORMAL:
case I915_GGTT_VIEW_PARTIAL:
case I915_GGTT_VIEW_ROTATED:
+ case I915_GGTT_VIEW_REMAPPED:
/* gcc complains if these are identical cases */
break;
}
@@ -204,6 +223,7 @@ struct i915_ggtt_view {
/* Members need to contain no holes/padding */
struct intel_partial_info partial;
struct intel_rotation_info rotated;
+ struct intel_remapped_info remapped;
};
};
@@ -228,25 +248,14 @@ struct i915_page_dma {
struct i915_page_table {
struct i915_page_dma base;
- unsigned int used_ptes;
+ atomic_t used;
};
struct i915_page_directory {
struct i915_page_dma base;
-
- struct i915_page_table *page_table[I915_PDES]; /* PDEs */
- unsigned int used_pdes;
-};
-
-struct i915_page_directory_pointer {
- struct i915_page_dma base;
- struct i915_page_directory **page_directory;
- unsigned int used_pdpes;
-};
-
-struct i915_pml4 {
- struct i915_page_dma base;
- struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
+ atomic_t used;
+ spinlock_t lock;
+ void *entry[512];
};
struct i915_vma_ops {
@@ -270,6 +279,8 @@ struct pagestash {
};
struct i915_address_space {
+ struct kref ref;
+
struct drm_mm mm;
struct drm_i915_private *i915;
struct device *dma;
@@ -296,7 +307,7 @@ struct i915_address_space {
struct i915_page_dma scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
- struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
+ struct i915_page_directory *scratch_pdp; /* GEN8+ & 48b PPGTT */
/**
* List of vma currently bound.
@@ -383,38 +394,46 @@ struct i915_ggtt {
u32 pin_bias;
+ unsigned int num_fences;
+ struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
+ struct list_head fence_list;
+
+ /** List of all objects in gtt_space, currently mmaped by userspace.
+ * All objects within this list must also be on bound_list.
+ */
+ struct list_head userfault_list;
+
+ /* Manual runtime pm autosuspend delay for user GGTT mmaps */
+ struct intel_wakeref_auto userfault_wakeref;
+
struct drm_mm_node error_capture;
+ struct drm_mm_node uc_fw;
};
-struct i915_hw_ppgtt {
+struct i915_ppgtt {
struct i915_address_space vm;
- struct kref ref;
intel_engine_mask_t pd_dirty_engines;
- union {
- struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */
- struct i915_page_directory_pointer pdp; /* GEN8+ */
- struct i915_page_directory pd; /* GEN6-7 */
- };
-
- u32 user_handle;
+ struct i915_page_directory *pd;
};
-struct gen6_hw_ppgtt {
- struct i915_hw_ppgtt base;
+struct gen6_ppgtt {
+ struct i915_ppgtt base;
struct i915_vma *vma;
gen6_pte_t __iomem *pd_addr;
unsigned int pin_count;
bool scan_for_unused_pt;
+
+ struct gen6_ppgtt_cleanup_work *work;
};
-#define __to_gen6_ppgtt(base) container_of(base, struct gen6_hw_ppgtt, base)
+#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base)
-static inline struct gen6_hw_ppgtt *to_gen6_ppgtt(struct i915_hw_ppgtt *base)
+static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
{
- BUILD_BUG_ON(offsetof(struct gen6_hw_ppgtt, base));
+ BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base));
return __to_gen6_ppgtt(base);
}
@@ -429,7 +448,7 @@ static inline struct gen6_hw_ppgtt *to_gen6_ppgtt(struct i915_hw_ppgtt *base)
#define gen6_for_each_pde(pt, pd, start, length, iter) \
for (iter = gen6_pde_index(start); \
length > 0 && iter < I915_PDES && \
- (pt = (pd)->page_table[iter], true); \
+ (pt = i915_pt_entry(pd, iter), true); \
({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
temp = min(temp - start, length); \
start += temp, length -= temp; }), ++iter)
@@ -437,7 +456,7 @@ static inline struct gen6_hw_ppgtt *to_gen6_ppgtt(struct i915_hw_ppgtt *base)
#define gen6_for_all_pdes(pt, pd, iter) \
for (iter = 0; \
iter < I915_PDES && \
- (pt = (pd)->page_table[iter], true); \
+ (pt = i915_pt_entry(pd, iter), true); \
++iter)
static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
@@ -496,6 +515,27 @@ i915_pdpes_per_pdp(const struct i915_address_space *vm)
return GEN8_3LVL_PDPES;
}
+static inline struct i915_page_table *
+i915_pt_entry(const struct i915_page_directory * const pd,
+ const unsigned short n)
+{
+ return pd->entry[n];
+}
+
+static inline struct i915_page_directory *
+i915_pd_entry(const struct i915_page_directory * const pdp,
+ const unsigned short n)
+{
+ return pdp->entry[n];
+}
+
+static inline struct i915_page_directory *
+i915_pdp_entry(const struct i915_page_directory * const pml4,
+ const unsigned short n)
+{
+ return pml4->entry[n];
+}
+
/* Equivalent to the gen6 version, For each pde iterates over every pde
* between from start until start + length. On gen8+ it simply iterates
* over every page directory entry in a page directory.
@@ -503,7 +543,7 @@ i915_pdpes_per_pdp(const struct i915_address_space *vm)
#define gen8_for_each_pde(pt, pd, start, length, iter) \
for (iter = gen8_pde_index(start); \
length > 0 && iter < I915_PDES && \
- (pt = (pd)->page_table[iter], true); \
+ (pt = i915_pt_entry(pd, iter), true); \
({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \
temp = min(temp - start, length); \
start += temp, length -= temp; }), ++iter)
@@ -511,7 +551,7 @@ i915_pdpes_per_pdp(const struct i915_address_space *vm)
#define gen8_for_each_pdpe(pd, pdp, start, length, iter) \
for (iter = gen8_pdpe_index(start); \
length > 0 && iter < i915_pdpes_per_pdp(vm) && \
- (pd = (pdp)->page_directory[iter], true); \
+ (pd = i915_pd_entry(pdp, iter), true); \
({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \
temp = min(temp - start, length); \
start += temp, length -= temp; }), ++iter)
@@ -519,7 +559,7 @@ i915_pdpes_per_pdp(const struct i915_address_space *vm)
#define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \
for (iter = gen8_pml4e_index(start); \
length > 0 && iter < GEN8_PML4ES_PER_PML4 && \
- (pdp = (pml4)->pdps[iter], true); \
+ (pdp = i915_pdp_entry(pml4, iter), true); \
({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \
temp = min(temp - start, length); \
start += temp, length -= temp; }), ++iter)
@@ -550,18 +590,30 @@ static inline u64 gen8_pte_count(u64 address, u64 length)
}
static inline dma_addr_t
-i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
+i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
{
- return px_dma(ppgtt->pdp.page_directory[n]);
+ struct i915_page_directory *pd;
+
+ pd = i915_pdp_entry(ppgtt->pd, n);
+ return px_dma(pd);
}
static inline struct i915_ggtt *
i915_vm_to_ggtt(struct i915_address_space *vm)
{
+ BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
GEM_BUG_ON(!i915_is_ggtt(vm));
return container_of(vm, struct i915_ggtt, vm);
}
+static inline struct i915_ppgtt *
+i915_vm_to_ppgtt(struct i915_address_space *vm)
+{
+ BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
+ GEM_BUG_ON(i915_is_ggtt(vm));
+ return container_of(vm, struct i915_ppgtt, vm);
+}
+
#define INTEL_MAX_PPAT_ENTRIES 8
#define INTEL_PPAT_PERFECT_MATCH (~0U)
@@ -593,9 +645,6 @@ const struct intel_ppat_entry *
intel_ppat_get(struct drm_i915_private *i915, u8 value);
void intel_ppat_put(const struct intel_ppat_entry *entry);
-int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915);
-void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915);
-
int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
@@ -606,26 +655,26 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
-struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
-void i915_ppgtt_release(struct kref *kref);
+struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
-static inline struct i915_hw_ppgtt *i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
+static inline struct i915_address_space *
+i915_vm_get(struct i915_address_space *vm)
{
- kref_get(&ppgtt->ref);
- return ppgtt;
+ kref_get(&vm->ref);
+ return vm;
}
-static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
+void i915_vm_release(struct kref *kref);
+
+static inline void i915_vm_put(struct i915_address_space *vm)
{
- if (ppgtt)
- kref_put(&ppgtt->ref, i915_ppgtt_release);
+ kref_put(&vm->ref, i915_vm_release);
}
-int gen6_ppgtt_pin(struct i915_hw_ppgtt *base);
-void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base);
-void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base);
+int gen6_ppgtt_pin(struct i915_ppgtt *base);
+void gen6_ppgtt_unpin(struct i915_ppgtt *base);
+void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
-void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_object.c b/drivers/gpu/drm/i915/i915_gem_object.c
deleted file mode 100644
index ac6a5ab84586..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_object.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "i915_drv.h"
-#include "i915_gem_object.h"
-#include "i915_globals.h"
-
-static struct i915_global_object {
- struct i915_global base;
- struct kmem_cache *slab_objects;
-} global;
-
-struct drm_i915_gem_object *i915_gem_object_alloc(void)
-{
- return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
-}
-
-void i915_gem_object_free(struct drm_i915_gem_object *obj)
-{
- return kmem_cache_free(global.slab_objects, obj);
-}
-
-/**
- * Mark up the object's coherency levels for a given cache_level
- * @obj: #drm_i915_gem_object
- * @cache_level: cache level
- */
-void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
- unsigned int cache_level)
-{
- obj->cache_level = cache_level;
-
- if (cache_level != I915_CACHE_NONE)
- obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
- I915_BO_CACHE_COHERENT_FOR_WRITE);
- else if (HAS_LLC(to_i915(obj->base.dev)))
- obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
- else
- obj->cache_coherent = 0;
-
- obj->cache_dirty =
- !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
-}
-
-static void i915_global_objects_shrink(void)
-{
- kmem_cache_shrink(global.slab_objects);
-}
-
-static void i915_global_objects_exit(void)
-{
- kmem_cache_destroy(global.slab_objects);
-}
-
-static struct i915_global_object global = { {
- .shrink = i915_global_objects_shrink,
- .exit = i915_global_objects_exit,
-} };
-
-int __init i915_global_objects_init(void)
-{
- global.slab_objects =
- KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
- if (!global.slab_objects)
- return -ENOMEM;
-
- i915_global_register(&global.base);
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
deleted file mode 100644
index ca93a40c0c87..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ /dev/null
@@ -1,509 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __I915_GEM_OBJECT_H__
-#define __I915_GEM_OBJECT_H__
-
-#include <linux/reservation.h>
-
-#include <drm/drm_vma_manager.h>
-#include <drm/drm_gem.h>
-#include <drm/drm_file.h>
-#include <drm/drm_device.h>
-
-#include <drm/i915_drm.h>
-
-#include "i915_request.h"
-#include "i915_selftest.h"
-
-struct drm_i915_gem_object;
-
-/*
- * struct i915_lut_handle tracks the fast lookups from handle to vma used
- * for execbuf. Although we use a radixtree for that mapping, in order to
- * remove them as the object or context is closed, we need a secondary list
- * and a translation entry (i915_lut_handle).
- */
-struct i915_lut_handle {
- struct list_head obj_link;
- struct list_head ctx_link;
- struct i915_gem_context *ctx;
- u32 handle;
-};
-
-struct drm_i915_gem_object_ops {
- unsigned int flags;
-#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
-#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
-#define I915_GEM_OBJECT_IS_PROXY BIT(2)
-#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3)
-
- /* Interface between the GEM object and its backing storage.
- * get_pages() is called once prior to the use of the associated set
- * of pages before to binding them into the GTT, and put_pages() is
- * called after we no longer need them. As we expect there to be
- * associated cost with migrating pages between the backing storage
- * and making them available for the GPU (e.g. clflush), we may hold
- * onto the pages after they are no longer referenced by the GPU
- * in case they may be used again shortly (for example migrating the
- * pages to a different memory domain within the GTT). put_pages()
- * will therefore most likely be called when the object itself is
- * being released or under memory pressure (where we attempt to
- * reap pages for the shrinker).
- */
- int (*get_pages)(struct drm_i915_gem_object *);
- void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
-
- int (*pwrite)(struct drm_i915_gem_object *,
- const struct drm_i915_gem_pwrite *);
-
- int (*dmabuf_export)(struct drm_i915_gem_object *);
- void (*release)(struct drm_i915_gem_object *);
-};
-
-struct drm_i915_gem_object {
- struct drm_gem_object base;
-
- const struct drm_i915_gem_object_ops *ops;
-
- struct {
- /**
- * @vma.lock: protect the list/tree of vmas
- */
- spinlock_t lock;
-
- /**
- * @vma.list: List of VMAs backed by this object
- *
- * The VMA on this list are ordered by type, all GGTT vma are
- * placed at the head and all ppGTT vma are placed at the tail.
- * The different types of GGTT vma are unordered between
- * themselves, use the @vma.tree (which has a defined order
- * between all VMA) to quickly find an exact match.
- */
- struct list_head list;
-
- /**
- * @vma.tree: Ordered tree of VMAs backed by this object
- *
- * All VMA created for this object are placed in the @vma.tree
- * for fast retrieval via a binary search in
- * i915_vma_instance(). They are also added to @vma.list for
- * easy iteration.
- */
- struct rb_root tree;
- } vma;
-
- /**
- * @lut_list: List of vma lookup entries in use for this object.
- *
- * If this object is closed, we need to remove all of its VMA from
- * the fast lookup index in associated contexts; @lut_list provides
- * this translation from object to context->handles_vma.
- */
- struct list_head lut_list;
-
- /** Stolen memory for this object, instead of being backed by shmem. */
- struct drm_mm_node *stolen;
- union {
- struct rcu_head rcu;
- struct llist_node freed;
- };
-
- /**
- * Whether the object is currently in the GGTT mmap.
- */
- unsigned int userfault_count;
- struct list_head userfault_link;
-
- struct list_head batch_pool_link;
- I915_SELFTEST_DECLARE(struct list_head st_link);
-
- unsigned long flags;
-
- /**
- * Have we taken a reference for the object for incomplete GPU
- * activity?
- */
-#define I915_BO_ACTIVE_REF 0
-
- /*
- * Is the object to be mapped as read-only to the GPU
- * Only honoured if hardware has relevant pte bit
- */
- unsigned int cache_level:3;
- unsigned int cache_coherent:2;
-#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
-#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
- unsigned int cache_dirty:1;
-
- /**
- * @read_domains: Read memory domains.
- *
- * These monitor which caches contain read/write data related to the
- * object. When transitioning from one set of domains to another,
- * the driver is called to ensure that caches are suitably flushed and
- * invalidated.
- */
- u16 read_domains;
-
- /**
- * @write_domain: Corresponding unique write memory domain.
- */
- u16 write_domain;
-
- atomic_t frontbuffer_bits;
- unsigned int frontbuffer_ggtt_origin; /* write once */
- struct i915_active_request frontbuffer_write;
-
- /** Current tiling stride for the object, if it's tiled. */
- unsigned int tiling_and_stride;
-#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
-#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
-#define STRIDE_MASK (~TILING_MASK)
-
- /** Count of VMA actually bound by this object */
- unsigned int bind_count;
- unsigned int active_count;
- /** Count of how many global VMA are currently pinned for use by HW */
- unsigned int pin_global;
-
- struct {
- struct mutex lock; /* protects the pages and their use */
- atomic_t pages_pin_count;
-
- struct sg_table *pages;
- void *mapping;
-
- /* TODO: whack some of this into the error state */
- struct i915_page_sizes {
- /**
- * The sg mask of the pages sg_table. i.e the mask of
- * of the lengths for each sg entry.
- */
- unsigned int phys;
-
- /**
- * The gtt page sizes we are allowed to use given the
- * sg mask and the supported page sizes. This will
- * express the smallest unit we can use for the whole
- * object, as well as the larger sizes we may be able
- * to use opportunistically.
- */
- unsigned int sg;
-
- /**
- * The actual gtt page size usage. Since we can have
- * multiple vma associated with this object we need to
- * prevent any trampling of state, hence a copy of this
- * struct also lives in each vma, therefore the gtt
- * value here should only be read/write through the vma.
- */
- unsigned int gtt;
- } page_sizes;
-
- I915_SELFTEST_DECLARE(unsigned int page_mask);
-
- struct i915_gem_object_page_iter {
- struct scatterlist *sg_pos;
- unsigned int sg_idx; /* in pages, but 32bit eek! */
-
- struct radix_tree_root radix;
- struct mutex lock; /* protects this cache */
- } get_page;
-
- /**
- * Element within i915->mm.unbound_list or i915->mm.bound_list,
- * locked by i915->mm.obj_lock.
- */
- struct list_head link;
-
- /**
- * Advice: are the backing pages purgeable?
- */
- unsigned int madv:2;
-
- /**
- * This is set if the object has been written to since the
- * pages were last acquired.
- */
- bool dirty:1;
-
- /**
- * This is set if the object has been pinned due to unknown
- * swizzling.
- */
- bool quirked:1;
- } mm;
-
- /** Breadcrumb of last rendering to the buffer.
- * There can only be one writer, but we allow for multiple readers.
- * If there is a writer that necessarily implies that all other
- * read requests are complete - but we may only be lazily clearing
- * the read requests. A read request is naturally the most recent
- * request on a ring, so we may have two different write and read
- * requests on one ring where the write request is older than the
- * read request. This allows for the CPU to read from an active
- * buffer by only waiting for the write to complete.
- */
- struct reservation_object *resv;
-
- /** References from framebuffers, locks out tiling changes. */
- unsigned int framebuffer_references;
-
- /** Record of address bit 17 of each page at last unbind. */
- unsigned long *bit_17;
-
- union {
- struct i915_gem_userptr {
- uintptr_t ptr;
-
- struct i915_mm_struct *mm;
- struct i915_mmu_object *mmu_object;
- struct work_struct *work;
- } userptr;
-
- unsigned long scratch;
-
- void *gvt_info;
- };
-
- /** for phys allocated objects */
- struct drm_dma_handle *phys_handle;
-
- struct reservation_object __builtin_resv;
-};
-
-static inline struct drm_i915_gem_object *
-to_intel_bo(struct drm_gem_object *gem)
-{
- /* Assert that to_intel_bo(NULL) == NULL */
- BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
-
- return container_of(gem, struct drm_i915_gem_object, base);
-}
-
-struct drm_i915_gem_object *i915_gem_object_alloc(void);
-void i915_gem_object_free(struct drm_i915_gem_object *obj);
-
-/**
- * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
- * @filp: DRM file private date
- * @handle: userspace handle
- *
- * Returns:
- *
- * A pointer to the object named by the handle if such exists on @filp, NULL
- * otherwise. This object is only valid whilst under the RCU read lock, and
- * note carefully the object may be in the process of being destroyed.
- */
-static inline struct drm_i915_gem_object *
-i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
-{
-#ifdef CONFIG_LOCKDEP
- WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
-#endif
- return idr_find(&file->object_idr, handle);
-}
-
-static inline struct drm_i915_gem_object *
-i915_gem_object_lookup(struct drm_file *file, u32 handle)
-{
- struct drm_i915_gem_object *obj;
-
- rcu_read_lock();
- obj = i915_gem_object_lookup_rcu(file, handle);
- if (obj && !kref_get_unless_zero(&obj->base.refcount))
- obj = NULL;
- rcu_read_unlock();
-
- return obj;
-}
-
-__deprecated
-extern struct drm_gem_object *
-drm_gem_object_lookup(struct drm_file *file, u32 handle);
-
-__attribute__((nonnull))
-static inline struct drm_i915_gem_object *
-i915_gem_object_get(struct drm_i915_gem_object *obj)
-{
- drm_gem_object_get(&obj->base);
- return obj;
-}
-
-__attribute__((nonnull))
-static inline void
-i915_gem_object_put(struct drm_i915_gem_object *obj)
-{
- __drm_gem_object_put(&obj->base);
-}
-
-static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
-{
- reservation_object_lock(obj->resv, NULL);
-}
-
-static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
-{
- reservation_object_unlock(obj->resv);
-}
-
-static inline void
-i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
-{
- obj->base.vma_node.readonly = true;
-}
-
-static inline bool
-i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
-{
- return obj->base.vma_node.readonly;
-}
-
-static inline bool
-i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
-{
- return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
-}
-
-static inline bool
-i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
-{
- return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
-}
-
-static inline bool
-i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
-{
- return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
-}
-
-static inline bool
-i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
-{
- return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
-}
-
-static inline bool
-i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
-{
- return obj->active_count;
-}
-
-static inline bool
-i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
-{
- return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
-}
-
-static inline void
-i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
-{
- lockdep_assert_held(&obj->base.dev->struct_mutex);
- __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
-}
-
-static inline void
-i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
-{
- lockdep_assert_held(&obj->base.dev->struct_mutex);
- __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
-}
-
-void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
-
-static inline bool
-i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
-{
- return READ_ONCE(obj->framebuffer_references);
-}
-
-static inline unsigned int
-i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
-{
- return obj->tiling_and_stride & TILING_MASK;
-}
-
-static inline bool
-i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
-}
-
-static inline unsigned int
-i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
-{
- return obj->tiling_and_stride & STRIDE_MASK;
-}
-
-static inline unsigned int
-i915_gem_tile_height(unsigned int tiling)
-{
- GEM_BUG_ON(!tiling);
- return tiling == I915_TILING_Y ? 32 : 8;
-}
-
-static inline unsigned int
-i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
-{
- return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
-}
-
-static inline unsigned int
-i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
-{
- return (i915_gem_object_get_stride(obj) *
- i915_gem_object_get_tile_height(obj));
-}
-
-int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
- unsigned int tiling, unsigned int stride);
-
-static inline struct intel_engine_cs *
-i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
-{
- struct intel_engine_cs *engine = NULL;
- struct dma_fence *fence;
-
- rcu_read_lock();
- fence = reservation_object_get_excl_rcu(obj->resv);
- rcu_read_unlock();
-
- if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
- engine = to_request(fence)->engine;
- dma_fence_put(fence);
-
- return engine;
-}
-
-void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
- unsigned int cache_level);
-void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
-
-void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
- struct sg_table *pages,
- bool needs_clflush);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 9440024c763f..4ee032072d4f 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -84,7 +84,7 @@ static int render_state_setup(struct intel_render_state *so,
u32 *d;
int ret;
- ret = i915_gem_obj_prepare_shmem_write(so->obj, &needs_clflush);
+ ret = i915_gem_object_prepare_write(so->obj, &needs_clflush);
if (ret)
return ret;
@@ -166,7 +166,7 @@ static int render_state_setup(struct intel_render_state *so,
ret = 0;
out:
- i915_gem_obj_finish_shmem_access(so->obj);
+ i915_gem_object_finish_access(so->obj);
return ret;
err:
@@ -222,12 +222,14 @@ int i915_gem_render_state_emit(struct i915_request *rq)
goto err_unpin;
}
+ i915_vma_lock(so.vma);
err = i915_vma_move_to_active(so.vma, rq, 0);
+ i915_vma_unlock(so.vma);
err_unpin:
i915_vma_unpin(so.vma);
err_vma:
i915_vma_close(so.vma);
err_obj:
- __i915_gem_object_release_unless_active(so.obj);
+ i915_gem_object_put(so.obj);
return err;
}
diff --git a/drivers/gpu/drm/i915/i915_gemfs.h b/drivers/gpu/drm/i915/i915_gemfs.h
deleted file mode 100644
index cca8bdc5b93e..000000000000
--- a/drivers/gpu/drm/i915/i915_gemfs.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __I915_GEMFS_H__
-#define __I915_GEMFS_H__
-
-struct drm_i915_private;
-
-int i915_gemfs_init(struct drm_i915_private *i915);
-
-void i915_gemfs_fini(struct drm_i915_private *i915);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
index 81e5c2ce336b..2d5fcba98841 100644
--- a/drivers/gpu/drm/i915/i915_globals.c
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -8,8 +8,8 @@
#include <linux/workqueue.h>
#include "i915_active.h"
-#include "i915_gem_context.h"
-#include "i915_gem_object.h"
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_object.h"
#include "i915_globals.h"
#include "i915_request.h"
#include "i915_scheduler.h"
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index f51ff683dd2e..b7e9fddef270 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -36,8 +36,15 @@
#include <drm/drm_print.h>
-#include "i915_gpu_error.h"
+#include "display/intel_atomic.h"
+#include "display/intel_overlay.h"
+
+#include "gem/i915_gem_context.h"
+
#include "i915_drv.h"
+#include "i915_gpu_error.h"
+#include "i915_scatterlist.h"
+#include "intel_csr.h"
static inline const struct intel_engine_cs *
engine_lookup(const struct drm_i915_private *i915, unsigned int id)
@@ -1117,17 +1124,23 @@ static u32 i915_error_generate_code(struct i915_gpu_state *error,
static void gem_record_fences(struct i915_gpu_state *error)
{
struct drm_i915_private *dev_priv = error->i915;
+ struct intel_uncore *uncore = &dev_priv->uncore;
int i;
if (INTEL_GEN(dev_priv) >= 6) {
- for (i = 0; i < dev_priv->num_fence_regs; i++)
- error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
+ for (i = 0; i < dev_priv->ggtt.num_fences; i++)
+ error->fence[i] =
+ intel_uncore_read64(uncore,
+ FENCE_REG_GEN6_LO(i));
} else if (INTEL_GEN(dev_priv) >= 4) {
- for (i = 0; i < dev_priv->num_fence_regs; i++)
- error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
+ for (i = 0; i < dev_priv->ggtt.num_fences; i++)
+ error->fence[i] =
+ intel_uncore_read64(uncore,
+ FENCE_REG_965_LO(i));
} else {
- for (i = 0; i < dev_priv->num_fence_regs; i++)
- error->fence[i] = I915_READ(FENCE_REG(i));
+ for (i = 0; i < dev_priv->ggtt.num_fences; i++)
+ error->fence[i] =
+ intel_uncore_read(uncore, FENCE_REG(i));
}
error->nfence = i;
}
@@ -1143,7 +1156,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
if (INTEL_GEN(dev_priv) >= 8)
ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
else
- ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
+ ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
}
if (INTEL_GEN(dev_priv) >= 4) {
@@ -1213,7 +1226,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
if (HAS_PPGTT(dev_priv)) {
int i;
- ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
+ ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
if (IS_GEN(dev_priv, 6)) {
ee->vm_info.pp_dir_base =
@@ -1263,7 +1276,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
count = 0;
request = first;
- list_for_each_entry_from(request, &engine->timeline.requests, link)
+ list_for_each_entry_from(request, &engine->active.requests, sched.link)
count++;
if (!count)
return;
@@ -1276,7 +1289,8 @@ static void engine_record_requests(struct intel_engine_cs *engine,
count = 0;
request = first;
- list_for_each_entry_from(request, &engine->timeline.requests, link) {
+ list_for_each_entry_from(request,
+ &engine->active.requests, sched.link) {
if (count >= ee->num_requests) {
/*
* If the ring request list was changed in
@@ -1419,7 +1433,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
struct i915_gem_context *ctx = request->gem_context;
struct intel_ring *ring;
- ee->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ggtt->vm;
+ ee->vm = ctx->vm ?: &ggtt->vm;
record_context(&ee->context, ctx);
@@ -1564,7 +1578,8 @@ static void capture_uc_state(struct i915_gpu_state *error)
/* Capture all registers which don't fit into another category. */
static void capture_reg_state(struct i915_gpu_state *error)
{
- struct drm_i915_private *dev_priv = error->i915;
+ struct drm_i915_private *i915 = error->i915;
+ struct intel_uncore *uncore = &i915->uncore;
int i;
/* General organization
@@ -1576,71 +1591,84 @@ static void capture_reg_state(struct i915_gpu_state *error)
*/
/* 1: Registers specific to a single generation */
- if (IS_VALLEYVIEW(dev_priv)) {
- error->gtier[0] = I915_READ(GTIER);
- error->ier = I915_READ(VLV_IER);
- error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
+ if (IS_VALLEYVIEW(i915)) {
+ error->gtier[0] = intel_uncore_read(uncore, GTIER);
+ error->ier = intel_uncore_read(uncore, VLV_IER);
+ error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
}
- if (IS_GEN(dev_priv, 7))
- error->err_int = I915_READ(GEN7_ERR_INT);
+ if (IS_GEN(i915, 7))
+ error->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
- if (INTEL_GEN(dev_priv) >= 8) {
- error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
- error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
+ if (INTEL_GEN(i915) >= 8) {
+ error->fault_data0 = intel_uncore_read(uncore,
+ GEN8_FAULT_TLB_DATA0);
+ error->fault_data1 = intel_uncore_read(uncore,
+ GEN8_FAULT_TLB_DATA1);
}
- if (IS_GEN(dev_priv, 6)) {
- error->forcewake = I915_READ_FW(FORCEWAKE);
- error->gab_ctl = I915_READ(GAB_CTL);
- error->gfx_mode = I915_READ(GFX_MODE);
+ if (IS_GEN(i915, 6)) {
+ error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
+ error->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
+ error->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
}
/* 2: Registers which belong to multiple generations */
- if (INTEL_GEN(dev_priv) >= 7)
- error->forcewake = I915_READ_FW(FORCEWAKE_MT);
+ if (INTEL_GEN(i915) >= 7)
+ error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
- if (INTEL_GEN(dev_priv) >= 6) {
- error->derrmr = I915_READ(DERRMR);
- error->error = I915_READ(ERROR_GEN6);
- error->done_reg = I915_READ(DONE_REG);
+ if (INTEL_GEN(i915) >= 6) {
+ error->derrmr = intel_uncore_read(uncore, DERRMR);
+ error->error = intel_uncore_read(uncore, ERROR_GEN6);
+ error->done_reg = intel_uncore_read(uncore, DONE_REG);
}
- if (INTEL_GEN(dev_priv) >= 5)
- error->ccid = I915_READ(CCID(RENDER_RING_BASE));
+ if (INTEL_GEN(i915) >= 5)
+ error->ccid = intel_uncore_read(uncore, CCID(RENDER_RING_BASE));
/* 3: Feature specific registers */
- if (IS_GEN_RANGE(dev_priv, 6, 7)) {
- error->gam_ecochk = I915_READ(GAM_ECOCHK);
- error->gac_eco = I915_READ(GAC_ECO_BITS);
+ if (IS_GEN_RANGE(i915, 6, 7)) {
+ error->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
+ error->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
}
/* 4: Everything else */
- if (INTEL_GEN(dev_priv) >= 11) {
- error->ier = I915_READ(GEN8_DE_MISC_IER);
- error->gtier[0] = I915_READ(GEN11_RENDER_COPY_INTR_ENABLE);
- error->gtier[1] = I915_READ(GEN11_VCS_VECS_INTR_ENABLE);
- error->gtier[2] = I915_READ(GEN11_GUC_SG_INTR_ENABLE);
- error->gtier[3] = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
- error->gtier[4] = I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE);
- error->gtier[5] = I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE);
+ if (INTEL_GEN(i915) >= 11) {
+ error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
+ error->gtier[0] =
+ intel_uncore_read(uncore,
+ GEN11_RENDER_COPY_INTR_ENABLE);
+ error->gtier[1] =
+ intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
+ error->gtier[2] =
+ intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
+ error->gtier[3] =
+ intel_uncore_read(uncore,
+ GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ error->gtier[4] =
+ intel_uncore_read(uncore,
+ GEN11_CRYPTO_RSVD_INTR_ENABLE);
+ error->gtier[5] =
+ intel_uncore_read(uncore,
+ GEN11_GUNIT_CSME_INTR_ENABLE);
error->ngtier = 6;
- } else if (INTEL_GEN(dev_priv) >= 8) {
- error->ier = I915_READ(GEN8_DE_MISC_IER);
+ } else if (INTEL_GEN(i915) >= 8) {
+ error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
- error->gtier[i] = I915_READ(GEN8_GT_IER(i));
+ error->gtier[i] = intel_uncore_read(uncore,
+ GEN8_GT_IER(i));
error->ngtier = 4;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- error->ier = I915_READ(DEIER);
- error->gtier[0] = I915_READ(GTIER);
+ } else if (HAS_PCH_SPLIT(i915)) {
+ error->ier = intel_uncore_read(uncore, DEIER);
+ error->gtier[0] = intel_uncore_read(uncore, GTIER);
error->ngtier = 1;
- } else if (IS_GEN(dev_priv, 2)) {
- error->ier = I915_READ16(GEN2_IER);
- } else if (!IS_VALLEYVIEW(dev_priv)) {
- error->ier = I915_READ(GEN2_IER);
+ } else if (IS_GEN(i915, 2)) {
+ error->ier = intel_uncore_read16(uncore, GEN2_IER);
+ } else if (!IS_VALLEYVIEW(i915)) {
+ error->ier = intel_uncore_read(uncore, GEN2_IER);
}
- error->eir = I915_READ(EIR);
- error->pgtbl_er = I915_READ(PGTBL_ER);
+ error->eir = intel_uncore_read(uncore, EIR);
+ error->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
}
static const char *
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 5dc761e85d9d..2ecd0c6a1c94 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -13,8 +13,9 @@
#include <drm/drm_mm.h>
+#include "gt/intel_engine.h"
+
#include "intel_device_info.h"
-#include "intel_ringbuffer.h"
#include "intel_uc_fw.h"
#include "i915_gem.h"
@@ -178,8 +179,6 @@ struct i915_gpu_state {
struct scatterlist *sgl, *fit;
};
-struct i915_gpu_restart;
-
struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@@ -240,8 +239,6 @@ struct i915_gpu_error {
wait_queue_head_t reset_queue;
struct srcu_struct reset_backoff_srcu;
-
- struct i915_gpu_restart *restart;
};
struct drm_i915_error_state_buf {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b92cfd69134b..b2e27b5b0df9 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -37,10 +37,16 @@
#include <drm/drm_irq.h>
#include <drm/i915_drm.h>
+#include "display/intel_fifo_underrun.h"
+#include "display/intel_hotplug.h"
+#include "display/intel_lpe_audio.h"
+#include "display/intel_psr.h"
+
#include "i915_drv.h"
+#include "i915_irq.h"
#include "i915_trace.h"
#include "intel_drv.h"
-#include "intel_psr.h"
+#include "intel_pm.h"
/**
* DOC: interrupt handling
@@ -136,6 +142,12 @@ static const u32 hpd_icp[HPD_NUM_PINS] = {
[HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
};
+static const u32 hpd_mcc[HPD_NUM_PINS] = {
+ [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
+ [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
+ [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP
+};
+
static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
i915_reg_t iir, i915_reg_t ier)
{
@@ -382,7 +394,7 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
{
ilk_update_gt_irq(dev_priv, mask, mask);
- POSTING_READ_FW(GTIMR);
+ intel_uncore_posting_read_fw(&dev_priv->uncore, GTIMR);
}
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
@@ -584,7 +596,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
{
- assert_rpm_wakelock_held(dev_priv);
+ assert_rpm_wakelock_held(&dev_priv->runtime_pm);
spin_lock_irq(&dev_priv->irq_lock);
gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
@@ -593,13 +605,13 @@ void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
{
- assert_rpm_wakelock_held(dev_priv);
+ assert_rpm_wakelock_held(&dev_priv->runtime_pm);
spin_lock_irq(&dev_priv->irq_lock);
- if (!dev_priv->guc.interrupts_enabled) {
+ if (!dev_priv->guc.interrupts.enabled) {
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
dev_priv->pm_guc_events);
- dev_priv->guc.interrupts_enabled = true;
+ dev_priv->guc.interrupts.enabled = true;
gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
}
spin_unlock_irq(&dev_priv->irq_lock);
@@ -607,10 +619,10 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
{
- assert_rpm_wakelock_held(dev_priv);
+ assert_rpm_wakelock_held(&dev_priv->runtime_pm);
spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->guc.interrupts_enabled = false;
+ dev_priv->guc.interrupts.enabled = false;
gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
@@ -620,6 +632,42 @@ void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
gen9_reset_guc_interrupts(dev_priv);
}
+void gen11_reset_guc_interrupts(struct drm_i915_private *i915)
+{
+ spin_lock_irq(&i915->irq_lock);
+ gen11_reset_one_iir(i915, 0, GEN11_GUC);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+void gen11_enable_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (!dev_priv->guc.interrupts.enabled) {
+ u32 events = REG_FIELD_PREP(ENGINE1_MASK,
+ GEN11_GUC_INTR_GUC2HOST);
+
+ WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GUC));
+ I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, events);
+ I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~events);
+ dev_priv->guc.interrupts.enabled = true;
+ }
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void gen11_disable_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->guc.interrupts.enabled = false;
+
+ I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
+ I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+ synchronize_irq(dev_priv->drm.irq);
+
+ gen11_reset_guc_interrupts(dev_priv);
+}
+
/**
* bdw_update_port_irq - update DE port interrupt
* @dev_priv: driver private
@@ -1191,20 +1239,23 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc)
static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 busy_up, busy_down, max_avg, min_avg;
u8 new_delay;
spin_lock(&mchdev_lock);
- I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
+ intel_uncore_write16(uncore,
+ MEMINTRSTS,
+ intel_uncore_read(uncore, MEMINTRSTS));
new_delay = dev_priv->ips.cur_delay;
- I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
- busy_up = I915_READ(RCPREVBSYTUPAVG);
- busy_down = I915_READ(RCPREVBSYTDNAVG);
- max_avg = I915_READ(RCBMAXAVG);
- min_avg = I915_READ(RCBMINAVG);
+ intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
+ busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
+ busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
+ max_avg = intel_uncore_read(uncore, RCBMAXAVG);
+ min_avg = intel_uncore_read(uncore, RCBMINAVG);
/* Handle RCS change request from hw */
if (busy_up > max_avg) {
@@ -1301,7 +1352,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
goto out;
- mutex_lock(&dev_priv->pcu_lock);
+ mutex_lock(&rps->lock);
pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
@@ -1367,7 +1418,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
rps->last_adj = 0;
}
- mutex_unlock(&dev_priv->pcu_lock);
+ mutex_unlock(&rps->lock);
out:
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
@@ -1889,6 +1940,12 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
intel_guc_to_host_event_handler(&dev_priv->guc);
}
+static void gen11_guc_irq_handler(struct drm_i915_private *i915, u16 iir)
+{
+ if (iir & GEN11_GUC_INTR_GUC2HOST)
+ intel_guc_to_host_event_handler(&i915->guc);
+}
+
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
@@ -2136,7 +2193,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
do {
u32 iir, gt_iir, pm_iir;
@@ -2207,7 +2264,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
} while (0);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
}
@@ -2222,7 +2279,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
do {
u32 master_ctl, iir;
@@ -2288,7 +2345,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
} while (0);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
}
@@ -2447,7 +2504,8 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
cpt_serr_int_handler(dev_priv);
}
-static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
+ const u32 *pins)
{
u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
@@ -2461,7 +2519,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
ddi_hotplug_trigger,
- dig_hotplug_reg, hpd_icp,
+ dig_hotplug_reg, pins,
icp_ddi_port_hotplug_long_detect);
}
@@ -2473,7 +2531,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
tc_hotplug_trigger,
- dig_hotplug_reg, hpd_icp,
+ dig_hotplug_reg, pins,
icp_tc_port_hotplug_long_detect);
}
@@ -2642,7 +2700,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
@@ -2694,7 +2752,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
I915_WRITE(SDEIER, sde_ier);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
}
@@ -2904,8 +2962,10 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_handler(dev_priv, iir);
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC)
+ icp_irq_handler(dev_priv, iir, hpd_mcc);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ icp_irq_handler(dev_priv, iir, hpd_icp);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
spt_irq_handler(dev_priv, iir);
else
@@ -2961,9 +3021,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & ~GEN8_GT_IRQS) {
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
gen8_de_irq_handler(dev_priv, master_ctl);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
}
gen8_master_intr_enable(regs);
@@ -3011,6 +3071,9 @@ static void
gen11_other_irq_handler(struct drm_i915_private * const i915,
const u8 instance, const u16 iir)
{
+ if (instance == OTHER_GUC_INSTANCE)
+ return gen11_guc_irq_handler(i915, iir);
+
if (instance == OTHER_GTPM_INSTANCE)
return gen11_rps_irq_handler(i915, iir);
@@ -3159,13 +3222,13 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
if (master_ctl & GEN11_DISPLAY_IRQ) {
const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
- disable_rpm_wakeref_asserts(i915);
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
/*
* GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
* for the display related bits.
*/
gen8_de_irq_handler(i915, disp_ctl);
- enable_rpm_wakeref_asserts(i915);
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
}
gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
@@ -3541,6 +3604,8 @@ static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
+ I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
+ I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
}
static void gen11_irq_reset(struct drm_device *dev)
@@ -4196,6 +4261,10 @@ static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->pm_imr = ~dev_priv->pm_ier;
I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
+
+ /* Same thing for GuC interrupts */
+ I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
+ I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
}
static void icp_irq_postinstall(struct drm_device *dev)
@@ -4268,8 +4337,10 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
struct intel_uncore *uncore = &dev_priv->uncore;
u16 enable_mask;
- I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
- I915_ERROR_MEMORY_REFRESH));
+ intel_uncore_write16(uncore,
+ EMR,
+ ~(I915_ERROR_PAGE_TABLE |
+ I915_ERROR_MEMORY_REFRESH));
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
@@ -4295,17 +4366,18 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv,
+static void i8xx_error_irq_ack(struct drm_i915_private *i915,
u16 *eir, u16 *eir_stuck)
{
+ struct intel_uncore *uncore = &i915->uncore;
u16 emr;
- *eir = I915_READ16(EIR);
+ *eir = intel_uncore_read16(uncore, EIR);
if (*eir)
- I915_WRITE16(EIR, *eir);
+ intel_uncore_write16(uncore, EIR, *eir);
- *eir_stuck = I915_READ16(EIR);
+ *eir_stuck = intel_uncore_read16(uncore, EIR);
if (*eir_stuck == 0)
return;
@@ -4319,9 +4391,9 @@ static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv,
* (or by a GPU reset) so we mask any bit that
* remains set.
*/
- emr = I915_READ16(EMR);
- I915_WRITE16(EMR, 0xffff);
- I915_WRITE16(EMR, emr | *eir_stuck);
+ emr = intel_uncore_read16(uncore, EMR);
+ intel_uncore_write16(uncore, EMR, 0xffff);
+ intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
}
static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
@@ -4380,14 +4452,14 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
do {
u32 pipe_stats[I915_MAX_PIPES] = {};
u16 eir = 0, eir_stuck = 0;
u16 iir;
- iir = I915_READ16(GEN2_IIR);
+ iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
if (iir == 0)
break;
@@ -4400,7 +4472,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE16(GEN2_IIR, iir);
+ intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
@@ -4411,7 +4483,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
} while (0);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
}
@@ -4485,7 +4557,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
do {
u32 pipe_stats[I915_MAX_PIPES] = {};
@@ -4524,7 +4596,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
} while (0);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
}
@@ -4633,7 +4705,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
do {
u32 pipe_stats[I915_MAX_PIPES] = {};
@@ -4674,7 +4746,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
} while (0);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
}
@@ -4703,7 +4775,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
for (i = 0; i < MAX_L3_SLICES; ++i)
dev_priv->l3_parity.remap_info[i] = NULL;
- if (HAS_GUC_SCHED(dev_priv))
+ if (HAS_GUC_SCHED(dev_priv) && INTEL_GEN(dev_priv) < 11)
dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
/* Let's track the enabled rps events */
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
new file mode 100644
index 000000000000..cb25dd213308
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_IRQ_H__
+#define __I915_IRQ_H__
+
+#include <linux/types.h>
+
+#include "i915_drv.h"
+
+struct drm_i915_private;
+struct intel_crtc;
+
+extern void intel_irq_init(struct drm_i915_private *dev_priv);
+extern void intel_irq_fini(struct drm_i915_private *dev_priv);
+int intel_irq_install(struct drm_i915_private *dev_priv);
+void intel_irq_uninstall(struct drm_i915_private *dev_priv);
+
+u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+void
+i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+ u32 status_mask);
+
+void
+i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+ u32 status_mask);
+
+void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
+void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
+
+void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
+ u32 mask,
+ u32 bits);
+void ilk_update_display_irq(struct drm_i915_private *dev_priv,
+ u32 interrupt_mask,
+ u32 enabled_irq_mask);
+static inline void
+ilk_enable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
+{
+ ilk_update_display_irq(dev_priv, bits, bits);
+}
+static inline void
+ilk_disable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
+{
+ ilk_update_display_irq(dev_priv, bits, 0);
+}
+void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ u32 interrupt_mask,
+ u32 enabled_irq_mask);
+static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 bits)
+{
+ bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
+}
+static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 bits)
+{
+ bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
+}
+void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+ u32 interrupt_mask,
+ u32 enabled_irq_mask);
+static inline void
+ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
+{
+ ibx_display_interrupt_update(dev_priv, bits, bits);
+}
+static inline void
+ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
+{
+ ibx_display_interrupt_update(dev_priv, bits, 0);
+}
+
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
+void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
+void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
+void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
+void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
+void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
+void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
+void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
+
+static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
+ u32 mask)
+{
+ return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
+}
+
+void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
+static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
+{
+ /*
+ * We only use drm_irq_uninstall() at unload and VT switch, so
+ * this is the only thing we need to check.
+ */
+ return dev_priv->runtime_pm.irqs_enabled;
+}
+
+int intel_get_crtc_scanline(struct intel_crtc *crtc);
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
+ u8 pipe_mask);
+void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
+ u8 pipe_mask);
+void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
+void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
+void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
+void gen11_reset_guc_interrupts(struct drm_i915_private *i915);
+void gen11_enable_guc_interrupts(struct drm_i915_private *i915);
+void gen11_disable_guc_interrupts(struct drm_i915_private *i915);
+
+#endif /* __I915_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index b5be0abbba35..5b07766a1c26 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -87,9 +87,12 @@ i915_param_named_unsafe(enable_psr, int, 0600,
"(0=disabled, 1=enabled) "
"Default: -1 (use per-chip default)");
+i915_param_named_unsafe(force_probe, charp, 0400,
+ "Force probe the driver for specified devices. "
+ "See CONFIG_DRM_I915_FORCE_PROBE for details.");
+
i915_param_named_unsafe(alpha_support, bool, 0400,
- "Enable alpha quality driver support for latest hardware. "
- "See also CONFIG_DRM_I915_ALPHA_SUPPORT.");
+ "Deprecated. See i915.force_probe.");
i915_param_named_unsafe(disable_power_well, int, 0400,
"Disable display power wells when possible "
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 3f14e9881a0d..a4770ce46bd2 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -59,11 +59,12 @@ struct drm_printer;
param(char *, guc_firmware_path, NULL) \
param(char *, huc_firmware_path, NULL) \
param(char *, dmc_firmware_path, NULL) \
- param(int, mmio_debug, 0) \
+ param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO)) \
param(int, edp_vswing, 0) \
param(int, reset, 2) \
param(unsigned int, inject_load_failure, 0) \
param(int, fastboot, -1) \
+ param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE) \
/* leave bools at the end to not create holes */ \
param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
param(bool, enable_hangcheck, true) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index f893c2cbce15..6c9f46fc3e12 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -28,10 +28,11 @@
#include <drm/drm_drv.h>
+#include "display/intel_fbdev.h"
+
#include "i915_drv.h"
#include "i915_globals.h"
#include "i915_selftest.h"
-#include "intel_fbdev.h"
#define PLATFORM(x) .platform = (x)
#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
@@ -370,6 +371,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
+ .has_rps = true, \
.ppgtt_type = INTEL_PPGTT_ALIASING, \
.ppgtt_size = 31, \
I9XX_PIPE_OFFSETS, \
@@ -417,6 +419,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
+ .has_rps = true, \
.ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 31, \
IVB_PIPE_OFFSETS, \
@@ -470,6 +473,7 @@ static const struct intel_device_info intel_valleyview_info = {
.num_pipes = 2,
.has_runtime_pm = 1,
.has_rc6 = 1,
+ .has_rps = true,
.display.has_gmch = 1,
.display.has_hotplug = 1,
.ppgtt_type = INTEL_PPGTT_FULL,
@@ -565,6 +569,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_64bit_reloc = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
+ .has_rps = true,
.has_logical_ring_contexts = 1,
.display.has_gmch = 1,
.ppgtt_type = INTEL_PPGTT_FULL,
@@ -596,8 +601,6 @@ static const struct intel_device_info intel_cherryview_info = {
#define SKL_PLATFORM \
GEN9_FEATURES, \
- /* Display WA #0477 WaDisableIPC: skl */ \
- .display.has_ipc = 0, \
PLATFORM(INTEL_SKYLAKE)
static const struct intel_device_info intel_skylake_gt1_info = {
@@ -640,6 +643,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.has_runtime_pm = 1, \
.display.has_csr = 1, \
.has_rc6 = 1, \
+ .has_rps = true, \
.display.has_dp_mst = 1, \
.has_logical_ring_contexts = 1, \
.has_logical_ring_preemption = 1, \
@@ -744,7 +748,7 @@ static const struct intel_device_info intel_cannonlake_info = {
GEN(11), \
.ddb_size = 2048, \
.has_logical_ring_elsq = 1, \
- .color = { .degamma_lut_size = 33, .gamma_lut_size = 1024 }
+ .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }
static const struct intel_device_info intel_icelake_11_info = {
GEN11_FEATURES,
@@ -756,7 +760,7 @@ static const struct intel_device_info intel_icelake_11_info = {
static const struct intel_device_info intel_elkhartlake_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ELKHARTLAKE),
- .is_alpha_support = 1,
+ .require_force_probe = 1,
.engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0),
.ppgtt_size = 36,
};
@@ -850,16 +854,57 @@ static void i915_pci_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
+/* is device_id present in comma separated list of ids */
+static bool force_probe(u16 device_id, const char *devices)
+{
+ char *s, *p, *tok;
+ bool ret;
+
+ /* FIXME: transitional */
+ if (i915_modparams.alpha_support) {
+ DRM_INFO("i915.alpha_support is deprecated, use i915.force_probe=%04x instead\n",
+ device_id);
+ return true;
+ }
+
+ if (!devices || !*devices)
+ return false;
+
+ /* match everything */
+ if (strcmp(devices, "*") == 0)
+ return true;
+
+ s = kstrdup(devices, GFP_KERNEL);
+ if (!s)
+ return false;
+
+ for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) {
+ u16 val;
+
+ if (kstrtou16(tok, 16, &val) == 0 && val == device_id) {
+ ret = true;
+ break;
+ }
+ }
+
+ kfree(s);
+
+ return ret;
+}
+
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
int err;
- if (IS_ALPHA_SUPPORT(intel_info) && !i915_modparams.alpha_support) {
- DRM_INFO("The driver support for your hardware in this kernel version is alpha quality\n"
- "See CONFIG_DRM_I915_ALPHA_SUPPORT or i915.alpha_support module parameter\n"
- "to enable support in this kernel version, or check for kernel updates.\n");
+ if (intel_info->require_force_probe &&
+ !force_probe(pdev->device, i915_modparams.force_probe)) {
+ DRM_INFO("Your graphics device %04x is not properly supported by the driver in this\n"
+ "kernel version. To force driver probe anyway, use i915.force_probe=%04x\n"
+ "module parameter or CONFIG_DRM_I915_FORCE_PROBE=%04x configuration option,\n"
+ "or (recommended) check for kernel updates.\n",
+ pdev->device, pdev->device, pdev->device);
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index dc4ce694c06a..3d8162d28730 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -195,6 +195,10 @@
#include <linux/sizes.h>
#include <linux/uuid.h>
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_lrc_reg.h"
+
#include "i915_drv.h"
#include "i915_oa_hsw.h"
#include "i915_oa_bdw.h"
@@ -210,7 +214,6 @@
#include "i915_oa_cflgt3.h"
#include "i915_oa_cnl.h"
#include "i915_oa_icl.h"
-#include "intel_lrc_reg.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -1202,28 +1205,35 @@ static int i915_oa_read(struct i915_perf_stream *stream,
static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx)
{
- struct intel_engine_cs *engine = i915->engine[RCS0];
+ struct i915_gem_engines_iter it;
struct intel_context *ce;
- int ret;
+ int err;
- ret = i915_mutex_lock_interruptible(&i915->drm);
- if (ret)
- return ERR_PTR(ret);
+ err = i915_mutex_lock_interruptible(&i915->drm);
+ if (err)
+ return ERR_PTR(err);
- /*
- * As the ID is the gtt offset of the context's vma we
- * pin the vma to ensure the ID remains fixed.
- *
- * NB: implied RCS engine...
- */
- ce = intel_context_pin(ctx, engine);
- mutex_unlock(&i915->drm.struct_mutex);
- if (IS_ERR(ce))
- return ce;
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ if (ce->engine->class != RENDER_CLASS)
+ continue;
+
+ /*
+ * As the ID is the gtt offset of the context's vma we
+ * pin the vma to ensure the ID remains fixed.
+ */
+ err = intel_context_pin(ce);
+ if (err == 0) {
+ i915->perf.oa.pinned_ctx = ce;
+ break;
+ }
+ }
+ i915_gem_context_unlock_engines(ctx);
- i915->perf.oa.pinned_ctx = ce;
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err)
+ return ERR_PTR(err);
- return ce;
+ return i915->perf.oa.pinned_ctx;
}
/**
@@ -1365,7 +1375,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
free_oa_buffer(dev_priv);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv, stream->wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
if (stream->ctx)
oa_put_render_ctx_id(stream);
@@ -1502,7 +1512,7 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
- bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE);
+ bo = i915_gem_object_create_shmem(dev_priv, OA_BUFFER_SIZE);
if (IS_ERR(bo)) {
DRM_ERROR("Failed to allocate OA buffer\n");
ret = PTR_ERR(bo);
@@ -1679,7 +1689,7 @@ gen8_update_reg_state_unlocked(struct intel_context *ce,
CTX_REG(reg_state,
CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- gen8_make_rpcs(i915, &ce->sseu));
+ intel_sseu_make_rpcs(i915, &ce->sseu));
}
/*
@@ -1709,7 +1719,6 @@ gen8_update_reg_state_unlocked(struct intel_context *ce,
static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
const struct i915_oa_config *oa_config)
{
- struct intel_engine_cs *engine = dev_priv->engine[RCS0];
unsigned int map_type = i915_coherent_map_type(dev_priv);
struct i915_gem_context *ctx;
struct i915_request *rq;
@@ -1738,30 +1747,43 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
/* Update all contexts now that we've stalled the submission. */
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
- struct intel_context *ce = intel_context_lookup(ctx, engine);
- u32 *regs;
-
- /* OA settings will be set upon first use */
- if (!ce || !ce->state)
- continue;
-
- regs = i915_gem_object_pin_map(ce->state->obj, map_type);
- if (IS_ERR(regs))
- return PTR_ERR(regs);
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ for_each_gem_engine(ce,
+ i915_gem_context_lock_engines(ctx),
+ it) {
+ u32 *regs;
+
+ if (ce->engine->class != RENDER_CLASS)
+ continue;
+
+ /* OA settings will be set upon first use */
+ if (!ce->state)
+ continue;
+
+ regs = i915_gem_object_pin_map(ce->state->obj,
+ map_type);
+ if (IS_ERR(regs)) {
+ i915_gem_context_unlock_engines(ctx);
+ return PTR_ERR(regs);
+ }
- ce->state->obj->mm.dirty = true;
- regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
+ ce->state->obj->mm.dirty = true;
+ regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
- gen8_update_reg_state_unlocked(ce, regs, oa_config);
+ gen8_update_reg_state_unlocked(ce, regs, oa_config);
- i915_gem_object_unpin_map(ce->state->obj);
+ i915_gem_object_unpin_map(ce->state->obj);
+ }
+ i915_gem_context_unlock_engines(ctx);
}
/*
* Apply the configuration by doing one context restore of the edited
* context image.
*/
- rq = i915_request_alloc(engine, dev_priv->kernel_context);
+ rq = i915_request_create(dev_priv->engine[RCS0]->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -2090,7 +2112,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
* In our case we are expecting that taking pm + FORCEWAKE
* references will effectively disable RC6.
*/
- stream->wakeref = intel_runtime_pm_get(dev_priv);
+ stream->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
ret = alloc_oa_buffer(dev_priv);
@@ -2126,7 +2148,7 @@ err_oa_buf_alloc:
put_oa_config(dev_priv, stream->oa_config);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv, stream->wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
err_config:
if (stream->ctx)
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 46a52da3db29..8fe46ee920a0 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -6,9 +6,12 @@
#include <linux/irq.h>
#include <linux/pm_runtime.h>
-#include "i915_pmu.h"
-#include "intel_ringbuffer.h"
+
+#include "gt/intel_engine.h"
+
#include "i915_drv.h"
+#include "i915_pmu.h"
+#include "intel_pm.h"
/* Frequency for the sampling timer for events which need it. */
#define FREQUENCY 200
@@ -168,7 +171,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
wakeref = 0;
if (READ_ONCE(dev_priv->gt.awake))
- wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+ wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
if (!wakeref)
return;
@@ -204,7 +207,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
static void
@@ -224,9 +227,12 @@ frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
if (dev_priv->gt.awake) {
intel_wakeref_t wakeref;
- with_intel_runtime_pm_if_in_use(dev_priv, wakeref)
- val = intel_get_cagf(dev_priv,
- I915_READ_NOTRACE(GEN6_RPSTAT1));
+ with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm,
+ wakeref) {
+ val = intel_uncore_read_notrace(&dev_priv->uncore,
+ GEN6_RPSTAT1);
+ val = intel_get_cagf(dev_priv, val);
+ }
}
add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
@@ -438,14 +444,15 @@ static u64 __get_rc6(struct drm_i915_private *i915)
static u64 get_rc6(struct drm_i915_private *i915)
{
#if IS_ENABLED(CONFIG_PM)
+ struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t wakeref;
unsigned long flags;
u64 val;
- wakeref = intel_runtime_pm_get_if_in_use(i915);
+ wakeref = intel_runtime_pm_get_if_in_use(rpm);
if (wakeref) {
val = __get_rc6(i915);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(rpm, wakeref);
/*
* If we are coming back from being runtime suspended we must
@@ -464,8 +471,7 @@ static u64 get_rc6(struct drm_i915_private *i915)
spin_unlock_irqrestore(&i915->pmu.lock, flags);
} else {
- struct pci_dev *pdev = i915->drm.pdev;
- struct device *kdev = &pdev->dev;
+ struct device *kdev = rpm->kdev;
/*
* We are runtime suspended.
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 782183b78f49..7b7016171057 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -37,6 +37,8 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
+ u8 subslice_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
+ u8 eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
int ret;
if (query_item->flags != 0)
@@ -48,12 +50,10 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
slice_length = sizeof(sseu->slice_mask);
- subslice_length = sseu->max_slices *
- DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE);
- eu_length = sseu->max_slices * sseu->max_subslices *
- DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
-
- total_length = sizeof(topo) + slice_length + subslice_length + eu_length;
+ subslice_length = sseu->max_slices * subslice_stride;
+ eu_length = sseu->max_slices * sseu->max_subslices * eu_stride;
+ total_length = sizeof(topo) + slice_length + subslice_length +
+ eu_length;
ret = copy_query_item(&topo, sizeof(topo), total_length,
query_item);
@@ -69,10 +69,9 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
topo.subslice_offset = slice_length;
- topo.subslice_stride = DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE);
+ topo.subslice_stride = subslice_stride;
topo.eu_offset = slice_length + subslice_length;
- topo.eu_stride =
- DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
+ topo.eu_stride = eu_stride;
if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
&topo, sizeof(topo)))
@@ -96,9 +95,58 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
return total_length;
}
+static int
+query_engine_info(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query_item)
+{
+ struct drm_i915_query_engine_info __user *query_ptr =
+ u64_to_user_ptr(query_item->data_ptr);
+ struct drm_i915_engine_info __user *info_ptr;
+ struct drm_i915_query_engine_info query;
+ struct drm_i915_engine_info info = { };
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int len, ret;
+
+ if (query_item->flags)
+ return -EINVAL;
+
+ len = sizeof(struct drm_i915_query_engine_info) +
+ RUNTIME_INFO(i915)->num_engines *
+ sizeof(struct drm_i915_engine_info);
+
+ ret = copy_query_item(&query, sizeof(query), len, query_item);
+ if (ret != 0)
+ return ret;
+
+ if (query.num_engines || query.rsvd[0] || query.rsvd[1] ||
+ query.rsvd[2])
+ return -EINVAL;
+
+ info_ptr = &query_ptr->engines[0];
+
+ for_each_engine(engine, i915, id) {
+ info.engine.engine_class = engine->uabi_class;
+ info.engine.engine_instance = engine->instance;
+ info.capabilities = engine->uabi_capabilities;
+
+ if (__copy_to_user(info_ptr, &info, sizeof(info)))
+ return -EFAULT;
+
+ query.num_engines++;
+ info_ptr++;
+ }
+
+ if (__copy_to_user(query_ptr, &query, sizeof(query)))
+ return -EFAULT;
+
+ return len;
+}
+
static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item) = {
query_topology_info,
+ query_engine_info,
};
int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 13d6bd4e17b2..d6483b5dc8e5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -126,7 +126,7 @@
*/
#define REG_BIT(__n) \
((u32)(BIT(__n) + \
- BUILD_BUG_ON_ZERO(__builtin_constant_p(__n) && \
+ BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
((__n) < 0 || (__n) > 31))))
/**
@@ -140,8 +140,8 @@
*/
#define REG_GENMASK(__high, __low) \
((u32)(GENMASK(__high, __low) + \
- BUILD_BUG_ON_ZERO(__builtin_constant_p(__high) && \
- __builtin_constant_p(__low) && \
+ BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
+ __is_constexpr(__low) && \
((__low) < 0 || (__high) > 31 || (__low) > (__high)))))
/*
@@ -153,7 +153,7 @@
* REG_FIELD_PREP() - Prepare a u32 bitfield value
* @__mask: shifted mask defining the field's length and position
* @__val: value to put in the field
-
+ *
* Local copy of FIELD_PREP() to generate an integer constant expression, force
* u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK().
*
@@ -290,6 +290,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OTHER_CLASS 4
#define MAX_ENGINE_CLASS 4
+#define OTHER_GUC_INSTANCE 0
#define OTHER_GTPM_INSTANCE 1
#define MAX_ENGINE_INSTANCE 3
@@ -1814,7 +1815,6 @@ enum i915_power_well_id {
#define PWR_DOWN_LN_3 (0x8 << 4)
#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
#define PWR_DOWN_LN_1_0 (0x3 << 4)
-#define PWR_DOWN_LN_1 (0x2 << 4)
#define PWR_DOWN_LN_3_1 (0xa << 4)
#define PWR_DOWN_LN_3_1_0 (0xb << 4)
#define PWR_DOWN_LN_MASK (0xf << 4)
@@ -1848,6 +1848,9 @@ enum i915_power_well_id {
#define VOLTAGE_INFO_MASK (3 << 24)
#define VOLTAGE_INFO_SHIFT 24
+#define ICL_PORT_COMP_DW8(port) _MMIO(_ICL_PORT_COMP_DW(8, port))
+#define IREFGEN (1 << 24)
+
#define CNL_PORT_COMP_DW9 _MMIO(0x162124)
#define ICL_PORT_COMP_DW9(port) _MMIO(_ICL_PORT_COMP_DW(9, port))
@@ -2510,6 +2513,13 @@ enum i915_power_well_id {
#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
+#define RING_FORCE_TO_NONPRIV_RW (0 << 28) /* CFL+ & Gen11+ */
+#define RING_FORCE_TO_NONPRIV_RD (1 << 28)
+#define RING_FORCE_TO_NONPRIV_WR (2 << 28)
+#define RING_FORCE_TO_NONPRIV_RANGE_1 (0 << 0) /* CFL+ & Gen11+ */
+#define RING_FORCE_TO_NONPRIV_RANGE_4 (1 << 0)
+#define RING_FORCE_TO_NONPRIV_RANGE_16 (2 << 0)
+#define RING_FORCE_TO_NONPRIV_RANGE_64 (3 << 0)
#define RING_MAX_NONPRIV_SLOTS 12
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
@@ -2696,7 +2706,7 @@ enum i915_power_well_id {
#define GFX_MODE _MMIO(0x2520)
#define GFX_MODE_GEN7 _MMIO(0x229c)
-#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base + 0x29c)
+#define RING_MODE_GEN7(base) _MMIO((base) + 0x29c)
#define GFX_RUN_LIST_ENABLE (1 << 15)
#define GFX_INTERRUPT_STEERING (1 << 14)
#define GFX_TLB_INVALIDATE_EXPLICIT (1 << 13)
@@ -2871,6 +2881,7 @@ enum i915_power_well_id {
#define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008)
#define GFX_FLSH_CNTL_EN (1 << 0)
#define ECOSKPD _MMIO(0x21d0)
+#define ECO_CONSTANT_BUFFER_SR_DISABLE REG_BIT(4)
#define ECO_GATING_CX_ONLY (1 << 3)
#define ECO_FLIP_DONE (1 << 0)
@@ -3159,6 +3170,7 @@ enum i915_power_well_id {
#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
#define ILK_DPFC_CHICKEN _MMIO(0x43224)
#define ILK_DPFC_DISABLE_DUMMY0 (1 << 8)
+#define ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL (1 << 14)
#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1 << 23)
#define ILK_FBC_RT_BASE _MMIO(0x2128)
#define ILK_FBC_RT_VALID (1 << 0)
@@ -4554,7 +4566,7 @@ enum {
#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
#define HDMI_MODE_SELECT_DVI (0 << 9) /* HDMI only */
#define HDMI_COLOR_RANGE_16_235 (1 << 8) /* HDMI only */
-#define SDVO_AUDIO_ENABLE (1 << 6)
+#define HDMI_AUDIO_ENABLE (1 << 6) /* HDMI only */
/* VSYNC/HSYNC bits new with 965, default is to be set */
#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
@@ -4694,7 +4706,7 @@ enum {
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
#define VIDEO_DIP_FREQ_MASK (3 << 16)
/* HSW and later: */
-#define DRM_DIP_ENABLE (1 << 28)
+#define VIDEO_DIP_ENABLE_DRM_GLK (1 << 28)
#define PSR_VSC_BIT_7_SET (1 << 27)
#define VSC_SELECT_MASK (0x3 << 25)
#define VSC_SELECT_SHIFT 25
@@ -5770,6 +5782,7 @@ enum {
#define _PIPE_MISC_B 0x71030
#define PIPEMISC_YUV420_ENABLE (1 << 27)
#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26)
+#define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */
#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11)
#define PIPEMISC_DITHER_BPC_MASK (7 << 5)
#define PIPEMISC_DITHER_8_BPC (0 << 5)
@@ -7198,7 +7211,8 @@ enum {
#define GAMMA_MODE_MODE_8BIT (0 << 0)
#define GAMMA_MODE_MODE_10BIT (1 << 0)
#define GAMMA_MODE_MODE_12BIT (2 << 0)
-#define GAMMA_MODE_MODE_SPLIT (3 << 0)
+#define GAMMA_MODE_MODE_SPLIT (3 << 0) /* ivb-bdw */
+#define GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED (3 << 0) /* icl + */
/* DMC/CSR */
#define CSR_PROGRAM(i) _MMIO(0x80000 + (i) * 4)
@@ -7490,6 +7504,9 @@ enum {
#define GEN11_CRYPTO_RSVD_INTR_MASK _MMIO(0x1900f0)
#define GEN11_GUNIT_CSME_INTR_MASK _MMIO(0x1900f4)
+#define ENGINE1_MASK REG_GENMASK(31, 16)
+#define ENGINE0_MASK REG_GENMASK(15, 0)
+
#define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004)
/* Required on all Ironlake and Sandybridge according to the B-Spec. */
#define ILK_ELPIN_409_SELECT (1 << 25)
@@ -7504,6 +7521,10 @@ enum {
#define ILK_eDP_A_DISABLE (1 << 24)
#define HSW_CDCLK_LIMIT (1 << 24)
#define ILK_DESKTOP (1 << 23)
+#define HSW_CPU_SSC_ENABLE (1 << 21)
+
+#define FUSE_STRAP3 _MMIO(0x42020)
+#define HSW_REF_CLK_SELECT (1 << 1)
#define ILK_DSPCLK_GATE_D _MMIO(0x42020)
#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
@@ -8149,6 +8170,7 @@ enum {
#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320
+#define _GLK_VIDEO_DIP_DRM_DATA_A 0x60440
#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240
#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280
#define _HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
@@ -8162,6 +8184,7 @@ enum {
#define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
#define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
#define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320
+#define _GLK_VIDEO_DIP_DRM_DATA_B 0x61440
#define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240
#define _HSW_VIDEO_DIP_VS_ECC_B 0x61280
#define _HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
@@ -8187,6 +8210,7 @@ enum {
#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_GMP_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
+#define GLK_TVIDEO_DIP_DRM_DATA(trans, i) _MMIO_TRANS2(trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4)
#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
@@ -8777,6 +8801,9 @@ enum {
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_READ_OC_PARAMS 0xc
+#define ICL_PCODE_MEM_SUBSYSYSTEM_INFO 0xd
+#define ICL_PCODE_MEM_SS_READ_GLOBAL_INFO (0x0 << 8)
+#define ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point) (((point) << 16) | (0x1 << 8))
#define GEN6_PCODE_READ_D_COMP 0x10
#define GEN6_PCODE_WRITE_D_COMP 0x11
#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
@@ -8866,6 +8893,7 @@ enum {
#define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7)
#define GEN10_SAMPLER_MODE _MMIO(0xE18C)
+#define GEN11_SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5)
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
@@ -9013,32 +9041,32 @@ enum {
/* HSW Audio */
#define _HSW_AUD_CONFIG_A 0x65000
#define _HSW_AUD_CONFIG_B 0x65100
-#define HSW_AUD_CFG(pipe) _MMIO_PIPE(pipe, _HSW_AUD_CONFIG_A, _HSW_AUD_CONFIG_B)
+#define HSW_AUD_CFG(trans) _MMIO_TRANS(trans, _HSW_AUD_CONFIG_A, _HSW_AUD_CONFIG_B)
#define _HSW_AUD_MISC_CTRL_A 0x65010
#define _HSW_AUD_MISC_CTRL_B 0x65110
-#define HSW_AUD_MISC_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B)
+#define HSW_AUD_MISC_CTRL(trans) _MMIO_TRANS(trans, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B)
#define _HSW_AUD_M_CTS_ENABLE_A 0x65028
#define _HSW_AUD_M_CTS_ENABLE_B 0x65128
-#define HSW_AUD_M_CTS_ENABLE(pipe) _MMIO_PIPE(pipe, _HSW_AUD_M_CTS_ENABLE_A, _HSW_AUD_M_CTS_ENABLE_B)
+#define HSW_AUD_M_CTS_ENABLE(trans) _MMIO_TRANS(trans, _HSW_AUD_M_CTS_ENABLE_A, _HSW_AUD_M_CTS_ENABLE_B)
#define AUD_M_CTS_M_VALUE_INDEX (1 << 21)
#define AUD_M_CTS_M_PROG_ENABLE (1 << 20)
#define AUD_CONFIG_M_MASK 0xfffff
#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
-#define HSW_AUD_DIP_ELD_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B)
+#define HSW_AUD_DIP_ELD_CTRL(trans) _MMIO_TRANS(trans, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B)
/* Audio Digital Converter */
#define _HSW_AUD_DIG_CNVT_1 0x65080
#define _HSW_AUD_DIG_CNVT_2 0x65180
-#define AUD_DIG_CNVT(pipe) _MMIO_PIPE(pipe, _HSW_AUD_DIG_CNVT_1, _HSW_AUD_DIG_CNVT_2)
+#define AUD_DIG_CNVT(trans) _MMIO_TRANS(trans, _HSW_AUD_DIG_CNVT_1, _HSW_AUD_DIG_CNVT_2)
#define DIP_PORT_SEL_MASK 0x3
#define _HSW_AUD_EDID_DATA_A 0x65050
#define _HSW_AUD_EDID_DATA_B 0x65150
-#define HSW_AUD_EDID_DATA(pipe) _MMIO_PIPE(pipe, _HSW_AUD_EDID_DATA_A, _HSW_AUD_EDID_DATA_B)
+#define HSW_AUD_EDID_DATA(trans) _MMIO_TRANS(trans, _HSW_AUD_EDID_DATA_A, _HSW_AUD_EDID_DATA_B)
#define HSW_AUD_PIPE_CONV_CFG _MMIO(0x6507c)
#define HSW_AUD_PIN_ELD_CP_VLD _MMIO(0x650c0)
@@ -9450,24 +9478,28 @@ enum skl_power_gate {
/* SPLL */
#define SPLL_CTL _MMIO(0x46020)
#define SPLL_PLL_ENABLE (1 << 31)
-#define SPLL_PLL_SSC (1 << 28)
-#define SPLL_PLL_NON_SSC (2 << 28)
-#define SPLL_PLL_LCPLL (3 << 28)
-#define SPLL_PLL_REF_MASK (3 << 28)
-#define SPLL_PLL_FREQ_810MHz (0 << 26)
-#define SPLL_PLL_FREQ_1350MHz (1 << 26)
-#define SPLL_PLL_FREQ_2700MHz (2 << 26)
-#define SPLL_PLL_FREQ_MASK (3 << 26)
+#define SPLL_REF_BCLK (0 << 28)
+#define SPLL_REF_MUXED_SSC (1 << 28) /* CPU SSC if fused enabled, PCH SSC otherwise */
+#define SPLL_REF_NON_SSC_HSW (2 << 28)
+#define SPLL_REF_PCH_SSC_BDW (2 << 28)
+#define SPLL_REF_LCPLL (3 << 28)
+#define SPLL_REF_MASK (3 << 28)
+#define SPLL_FREQ_810MHz (0 << 26)
+#define SPLL_FREQ_1350MHz (1 << 26)
+#define SPLL_FREQ_2700MHz (2 << 26)
+#define SPLL_FREQ_MASK (3 << 26)
/* WRPLL */
#define _WRPLL_CTL1 0x46040
#define _WRPLL_CTL2 0x46060
#define WRPLL_CTL(pll) _MMIO_PIPE(pll, _WRPLL_CTL1, _WRPLL_CTL2)
#define WRPLL_PLL_ENABLE (1 << 31)
-#define WRPLL_PLL_SSC (1 << 28)
-#define WRPLL_PLL_NON_SSC (2 << 28)
-#define WRPLL_PLL_LCPLL (3 << 28)
-#define WRPLL_PLL_REF_MASK (3 << 28)
+#define WRPLL_REF_BCLK (0 << 28)
+#define WRPLL_REF_PCH_SSC (1 << 28)
+#define WRPLL_REF_MUXED_SSC_BDW (2 << 28) /* CPU SSC if fused enabled, PCH SSC otherwise */
+#define WRPLL_REF_SPECIAL_HSW (2 << 28) /* muxed SSC (ULT), non-SSC (non-ULT) */
+#define WRPLL_REF_LCPLL (3 << 28)
+#define WRPLL_REF_MASK (3 << 28)
/* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x) << 0)
#define WRPLL_DIVIDER_REF_MASK (0xff)
@@ -9527,11 +9559,16 @@ enum skl_power_gate {
#define TRANS_MSA_12_BPC (3 << 5)
#define TRANS_MSA_16_BPC (4 << 5)
#define TRANS_MSA_CEA_RANGE (1 << 3)
+#define TRANS_MSA_USE_VSC_SDP (1 << 14)
/* LCPLL Control */
#define LCPLL_CTL _MMIO(0x130040)
#define LCPLL_PLL_DISABLE (1 << 31)
#define LCPLL_PLL_LOCK (1 << 30)
+#define LCPLL_REF_NON_SSC (0 << 28)
+#define LCPLL_REF_BCLK (2 << 28)
+#define LCPLL_REF_PCH_SSC (3 << 28)
+#define LCPLL_REF_MASK (3 << 28)
#define LCPLL_CLK_FREQ_MASK (3 << 26)
#define LCPLL_CLK_FREQ_450 (0 << 26)
#define LCPLL_CLK_FREQ_54O_BDW (1 << 26)
@@ -10148,6 +10185,22 @@ enum skl_power_gate {
#define PRE_CSC_GAMC_INDEX(pipe) _MMIO_PIPE(pipe, _PRE_CSC_GAMC_INDEX_A, _PRE_CSC_GAMC_INDEX_B)
#define PRE_CSC_GAMC_DATA(pipe) _MMIO_PIPE(pipe, _PRE_CSC_GAMC_DATA_A, _PRE_CSC_GAMC_DATA_B)
+/* ICL Multi segmented gamma */
+#define _PAL_PREC_MULTI_SEG_INDEX_A 0x4A408
+#define _PAL_PREC_MULTI_SEG_INDEX_B 0x4AC08
+#define PAL_PREC_MULTI_SEGMENT_AUTO_INCREMENT REG_BIT(15)
+#define PAL_PREC_MULTI_SEGMENT_INDEX_VALUE_MASK REG_GENMASK(4, 0)
+
+#define _PAL_PREC_MULTI_SEG_DATA_A 0x4A40C
+#define _PAL_PREC_MULTI_SEG_DATA_B 0x4AC0C
+
+#define PREC_PAL_MULTI_SEG_INDEX(pipe) _MMIO_PIPE(pipe, \
+ _PAL_PREC_MULTI_SEG_INDEX_A, \
+ _PAL_PREC_MULTI_SEG_INDEX_B)
+#define PREC_PAL_MULTI_SEG_DATA(pipe) _MMIO_PIPE(pipe, \
+ _PAL_PREC_MULTI_SEG_DATA_A, \
+ _PAL_PREC_MULTI_SEG_DATA_B)
+
/* pipe CSC & degamma/gamma LUTs on CHV */
#define _CGM_PIPE_A_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x67900)
#define _CGM_PIPE_A_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x67904)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index c88e538b2ef4..a195a92d0105 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -29,16 +29,20 @@
#include <linux/sched/clock.h>
#include <linux/sched/signal.h>
+#include "gem/i915_gem_context.h"
+#include "gt/intel_context.h"
+
#include "i915_active.h"
#include "i915_drv.h"
#include "i915_globals.h"
-#include "i915_reset.h"
#include "intel_pm.h"
struct execute_cb {
struct list_head link;
struct irq_work work;
struct i915_sw_fence *fence;
+ void (*hook)(struct i915_request *rq, struct dma_fence *signal);
+ struct i915_request *signal;
};
static struct i915_global_request {
@@ -132,19 +136,6 @@ i915_request_remove_from_client(struct i915_request *request)
spin_unlock(&file_priv->mm.lock);
}
-static void reserve_gt(struct drm_i915_private *i915)
-{
- if (!i915->gt.active_requests++)
- i915_gem_unpark(i915);
-}
-
-static void unreserve_gt(struct drm_i915_private *i915)
-{
- GEM_BUG_ON(!i915->gt.active_requests);
- if (!--i915->gt.active_requests)
- i915_gem_park(i915);
-}
-
static void advance_ring(struct i915_request *request)
{
struct intel_ring *ring = request->ring;
@@ -192,84 +183,23 @@ static void free_capture_list(struct i915_request *request)
}
}
-static void __retire_engine_request(struct intel_engine_cs *engine,
- struct i915_request *rq)
-{
- GEM_TRACE("%s(%s) fence %llx:%lld, current %d\n",
- __func__, engine->name,
- rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq));
-
- GEM_BUG_ON(!i915_request_completed(rq));
-
- local_irq_disable();
-
- spin_lock(&engine->timeline.lock);
- GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests));
- list_del_init(&rq->link);
- spin_unlock(&engine->timeline.lock);
-
- spin_lock(&rq->lock);
- i915_request_mark_complete(rq);
- if (!i915_request_signaled(rq))
- dma_fence_signal_locked(&rq->fence);
- if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
- i915_request_cancel_breadcrumb(rq);
- if (rq->waitboost) {
- GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
- atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
- }
- spin_unlock(&rq->lock);
-
- local_irq_enable();
-
- /*
- * The backing object for the context is done after switching to the
- * *next* context. Therefore we cannot retire the previous context until
- * the next context has already started running. However, since we
- * cannot take the required locks at i915_request_submit() we
- * defer the unpinning of the active context to now, retirement of
- * the subsequent request.
- */
- if (engine->last_retired_context)
- intel_context_unpin(engine->last_retired_context);
- engine->last_retired_context = rq->hw_context;
-}
-
-static void __retire_engine_upto(struct intel_engine_cs *engine,
- struct i915_request *rq)
-{
- struct i915_request *tmp;
-
- if (list_empty(&rq->link))
- return;
-
- do {
- tmp = list_first_entry(&engine->timeline.requests,
- typeof(*tmp), link);
-
- GEM_BUG_ON(tmp->engine != engine);
- __retire_engine_request(engine, tmp);
- } while (tmp != rq);
-}
-
-static void i915_request_retire(struct i915_request *request)
+static bool i915_request_retire(struct i915_request *rq)
{
struct i915_active_request *active, *next;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- request->engine->name,
- request->fence.context, request->fence.seqno,
- hwsp_seqno(request));
+ lockdep_assert_held(&rq->i915->drm.struct_mutex);
+ if (!i915_request_completed(rq))
+ return false;
- lockdep_assert_held(&request->i915->drm.struct_mutex);
- GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
- GEM_BUG_ON(!i915_request_completed(request));
+ GEM_TRACE("%s fence %llx:%lld, current %d\n",
+ rq->engine->name,
+ rq->fence.context, rq->fence.seqno,
+ hwsp_seqno(rq));
- trace_i915_request_retire(request);
+ GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
+ trace_i915_request_retire(rq);
- advance_ring(request);
- free_capture_list(request);
+ advance_ring(rq);
/*
* Walk through the active list, calling retire on each. This allows
@@ -281,7 +211,7 @@ static void i915_request_retire(struct i915_request *request)
* pass along the auxiliary information (to avoid dereferencing
* the node after the callback).
*/
- list_for_each_entry_safe(active, next, &request->active_list, link) {
+ list_for_each_entry_safe(active, next, &rq->active_list, link) {
/*
* In microbenchmarks or focusing upon time inside the kernel,
* we may spend an inordinate amount of time simply handling
@@ -297,19 +227,40 @@ static void i915_request_retire(struct i915_request *request)
INIT_LIST_HEAD(&active->link);
RCU_INIT_POINTER(active->request, NULL);
- active->retire(active, request);
+ active->retire(active, rq);
}
- i915_request_remove_from_client(request);
+ local_irq_disable();
- intel_context_unpin(request->hw_context);
+ spin_lock(&rq->engine->active.lock);
+ list_del(&rq->sched.link);
+ spin_unlock(&rq->engine->active.lock);
- __retire_engine_upto(request->engine, request);
+ spin_lock(&rq->lock);
+ i915_request_mark_complete(rq);
+ if (!i915_request_signaled(rq))
+ dma_fence_signal_locked(&rq->fence);
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
+ i915_request_cancel_breadcrumb(rq);
+ if (rq->waitboost) {
+ GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
+ atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
+ }
+ spin_unlock(&rq->lock);
- unreserve_gt(request->i915);
+ local_irq_enable();
+
+ intel_context_exit(rq->hw_context);
+ intel_context_unpin(rq->hw_context);
+
+ i915_request_remove_from_client(rq);
+ list_del(&rq->link);
+
+ free_capture_list(rq);
+ i915_sched_node_fini(&rq->sched);
+ i915_request_put(rq);
- i915_sched_node_fini(&request->sched);
- i915_request_put(request);
+ return true;
}
void i915_request_retire_upto(struct i915_request *rq)
@@ -331,9 +282,7 @@ void i915_request_retire_upto(struct i915_request *rq)
do {
tmp = list_first_entry(&ring->request_list,
typeof(*tmp), ring_link);
-
- i915_request_retire(tmp);
- } while (tmp != rq);
+ } while (i915_request_retire(tmp) && tmp != rq);
}
static void irq_execute_cb(struct irq_work *wrk)
@@ -344,6 +293,17 @@ static void irq_execute_cb(struct irq_work *wrk)
kmem_cache_free(global.slab_execute_cbs, cb);
}
+static void irq_execute_cb_hook(struct irq_work *wrk)
+{
+ struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
+
+ cb->hook(container_of(cb->fence, struct i915_request, submit),
+ &cb->signal->fence);
+ i915_request_put(cb->signal);
+
+ irq_execute_cb(wrk);
+}
+
static void __notify_execute_cb(struct i915_request *rq)
{
struct execute_cb *cb;
@@ -370,14 +330,19 @@ static void __notify_execute_cb(struct i915_request *rq)
}
static int
-i915_request_await_execution(struct i915_request *rq,
- struct i915_request *signal,
- gfp_t gfp)
+__i915_request_await_execution(struct i915_request *rq,
+ struct i915_request *signal,
+ void (*hook)(struct i915_request *rq,
+ struct dma_fence *signal),
+ gfp_t gfp)
{
struct execute_cb *cb;
- if (i915_request_is_active(signal))
+ if (i915_request_is_active(signal)) {
+ if (hook)
+ hook(rq, &signal->fence);
return 0;
+ }
cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
if (!cb)
@@ -387,8 +352,18 @@ i915_request_await_execution(struct i915_request *rq,
i915_sw_fence_await(cb->fence);
init_irq_work(&cb->work, irq_execute_cb);
+ if (hook) {
+ cb->hook = hook;
+ cb->signal = i915_request_get(signal);
+ cb->work.func = irq_execute_cb_hook;
+ }
+
spin_lock_irq(&signal->lock);
if (i915_request_is_active(signal)) {
+ if (hook) {
+ hook(rq, &signal->fence);
+ i915_request_put(signal);
+ }
i915_sw_fence_complete(cb->fence);
kmem_cache_free(global.slab_execute_cbs, cb);
} else {
@@ -399,28 +374,17 @@ i915_request_await_execution(struct i915_request *rq,
return 0;
}
-static void move_to_timeline(struct i915_request *request,
- struct i915_timeline *timeline)
-{
- GEM_BUG_ON(request->timeline == &request->engine->timeline);
- lockdep_assert_held(&request->engine->timeline.lock);
-
- spin_lock(&request->timeline->lock);
- list_move_tail(&request->link, &timeline->requests);
- spin_unlock(&request->timeline->lock);
-}
-
void __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- GEM_TRACE("%s fence %llx:%lld -> current %d\n",
+ GEM_TRACE("%s fence %llx:%lld, current %d\n",
engine->name,
request->fence.context, request->fence.seqno,
hwsp_seqno(request));
GEM_BUG_ON(!irqs_disabled());
- lockdep_assert_held(&engine->timeline.lock);
+ lockdep_assert_held(&engine->active.lock);
if (i915_gem_context_is_banned(request->gem_context))
i915_request_skip(request, -EIO);
@@ -443,11 +407,13 @@ void __i915_request_submit(struct i915_request *request)
*/
if (request->sched.semaphores &&
i915_sw_fence_signaled(&request->semaphore))
- request->hw_context->saturated |= request->sched.semaphores;
+ engine->saturated |= request->sched.semaphores;
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+ list_move_tail(&request->sched.link, &engine->active.requests);
+
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
@@ -463,8 +429,7 @@ void __i915_request_submit(struct i915_request *request)
engine->emit_fini_breadcrumb(request,
request->ring->vaddr + request->postfix);
- /* Transfer from per-context onto the global per-engine timeline */
- move_to_timeline(request, &engine->timeline);
+ engine->serial++;
trace_i915_request_execute(request);
}
@@ -475,11 +440,11 @@ void i915_request_submit(struct i915_request *request)
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
__i915_request_submit(request);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
void __i915_request_unsubmit(struct i915_request *request)
@@ -492,7 +457,7 @@ void __i915_request_unsubmit(struct i915_request *request)
hwsp_seqno(request));
GEM_BUG_ON(!irqs_disabled());
- lockdep_assert_held(&engine->timeline.lock);
+ lockdep_assert_held(&engine->active.lock);
/*
* Only unwind in reverse order, required so that the per-context list
@@ -510,8 +475,11 @@ void __i915_request_unsubmit(struct i915_request *request)
spin_unlock(&request->lock);
- /* Transfer back from the global per-engine timeline to per-context */
- move_to_timeline(request, request->timeline);
+ /* We've already spun, don't charge on resubmitting. */
+ if (request->sched.semaphores && i915_request_started(request)) {
+ request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+ request->sched.semaphores = 0;
+ }
/*
* We don't need to wake_up any waiters on request->execute, they
@@ -528,11 +496,11 @@ void i915_request_unsubmit(struct i915_request *request)
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
__i915_request_unsubmit(request);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static int __i915_sw_fence_call
@@ -588,16 +556,13 @@ static void ring_retire_requests(struct intel_ring *ring)
{
struct i915_request *rq, *rn;
- list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) {
- if (!i915_request_completed(rq))
+ list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link)
+ if (!i915_request_retire(rq))
break;
-
- i915_request_retire(rq);
- }
}
static noinline struct i915_request *
-i915_request_alloc_slow(struct intel_context *ce)
+request_alloc_slow(struct intel_context *ce, gfp_t gfp)
{
struct intel_ring *ring = ce->ring;
struct i915_request *rq;
@@ -605,6 +570,18 @@ i915_request_alloc_slow(struct intel_context *ce)
if (list_empty(&ring->request_list))
goto out;
+ if (!gfpflags_allow_blocking(gfp))
+ goto out;
+
+ /* Move our oldest request to the slab-cache (if not in use!) */
+ rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link);
+ i915_request_retire(rq);
+
+ rq = kmem_cache_alloc(global.slab_requests,
+ gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (rq)
+ return rq;
+
/* Ratelimit ourselves to prevent oom from malicious clients */
rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
cond_synchronize_rcu(rq->rcustate);
@@ -613,62 +590,21 @@ i915_request_alloc_slow(struct intel_context *ce)
ring_retire_requests(ring);
out:
- return kmem_cache_alloc(global.slab_requests, GFP_KERNEL);
+ return kmem_cache_alloc(global.slab_requests, gfp);
}
-/**
- * i915_request_alloc - allocate a request structure
- *
- * @engine: engine that we wish to issue the request on.
- * @ctx: context that the request will be associated with.
- *
- * Returns a pointer to the allocated request if successful,
- * or an error code if not.
- */
struct i915_request *
-i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
+__i915_request_create(struct intel_context *ce, gfp_t gfp)
{
- struct drm_i915_private *i915 = engine->i915;
- struct intel_context *ce;
- struct i915_timeline *tl;
+ struct i915_timeline *tl = ce->ring->timeline;
struct i915_request *rq;
u32 seqno;
int ret;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- /*
- * Preempt contexts are reserved for exclusive use to inject a
- * preemption context switch. They are never to be used for any trivial
- * request!
- */
- GEM_BUG_ON(ctx == i915->preempt_context);
-
- /*
- * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
- * EIO if the GPU is already wedged.
- */
- ret = i915_terminally_wedged(i915);
- if (ret)
- return ERR_PTR(ret);
-
- /*
- * Pinning the contexts may generate requests in order to acquire
- * GGTT space, so do this first before we reserve a seqno for
- * ourselves.
- */
- ce = intel_context_pin(ctx, engine);
- if (IS_ERR(ce))
- return ERR_CAST(ce);
-
- reserve_gt(i915);
- mutex_lock(&ce->ring->timeline->mutex);
+ might_sleep_if(gfpflags_allow_blocking(gfp));
- /* Move our oldest request to the slab-cache (if not in use!) */
- rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
- if (!list_is_last(&rq->ring_link, &ce->ring->request_list) &&
- i915_request_completed(rq))
- i915_request_retire(rq);
+ /* Check that the caller provided an already pinned context */
+ __intel_context_pin(ce);
/*
* Beware: Dragons be flying overhead.
@@ -700,30 +636,25 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* Do not use kmem_cache_zalloc() here!
*/
rq = kmem_cache_alloc(global.slab_requests,
- GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
- rq = i915_request_alloc_slow(ce);
+ rq = request_alloc_slow(ce, gfp);
if (!rq) {
ret = -ENOMEM;
goto err_unreserve;
}
}
- INIT_LIST_HEAD(&rq->active_list);
- INIT_LIST_HEAD(&rq->execute_cb);
-
- tl = ce->ring->timeline;
ret = i915_timeline_get_seqno(tl, rq, &seqno);
if (ret)
goto err_free;
- rq->i915 = i915;
- rq->engine = engine;
- rq->gem_context = ctx;
+ rq->i915 = ce->engine->i915;
rq->hw_context = ce;
+ rq->gem_context = ce->gem_context;
+ rq->engine = ce->engine;
rq->ring = ce->ring;
rq->timeline = tl;
- GEM_BUG_ON(rq->timeline == &engine->timeline);
rq->hwsp_seqno = tl->hwsp_seqno;
rq->hwsp_cacheline = tl->hwsp_cacheline;
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
@@ -743,6 +674,10 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
rq->batch = NULL;
rq->capture_list = NULL;
rq->waitboost = false;
+ rq->execution_mask = ALL_ENGINES;
+
+ INIT_LIST_HEAD(&rq->active_list);
+ INIT_LIST_HEAD(&rq->execute_cb);
/*
* Reserve space in the ring buffer for all the commands required to
@@ -756,7 +691,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* around inside i915_request_add() there is sufficient space at
* the beginning of the ring as well.
*/
- rq->reserved_space = 2 * engine->emit_fini_breadcrumb_dw * sizeof(u32);
+ rq->reserved_space =
+ 2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
/*
* Record the position of the start of the request so that
@@ -766,20 +702,13 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*/
rq->head = rq->ring->emit;
- ret = engine->request_alloc(rq);
+ ret = rq->engine->request_alloc(rq);
if (ret)
goto err_unwind;
- /* Keep a second pin for the dual retirement along engine and ring */
- __intel_context_pin(ce);
-
rq->infix = rq->ring->emit; /* end of header; start of user payload */
- /* Check that we didn't interrupt ourselves with a new request */
- lockdep_assert_held(&rq->timeline->mutex);
- GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
- rq->cookie = lockdep_pin_lock(&rq->timeline->mutex);
-
+ intel_context_mark_active(ce);
return rq;
err_unwind:
@@ -793,12 +722,41 @@ err_unwind:
err_free:
kmem_cache_free(global.slab_requests, rq);
err_unreserve:
- mutex_unlock(&ce->ring->timeline->mutex);
- unreserve_gt(i915);
intel_context_unpin(ce);
return ERR_PTR(ret);
}
+struct i915_request *
+i915_request_create(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ int err;
+
+ err = intel_context_timeline_lock(ce);
+ if (err)
+ return ERR_PTR(err);
+
+ /* Move our oldest request to the slab-cache (if not in use!) */
+ rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
+ if (!list_is_last(&rq->ring_link, &ce->ring->request_list))
+ i915_request_retire(rq);
+
+ intel_context_enter(ce);
+ rq = __i915_request_create(ce, GFP_KERNEL);
+ intel_context_exit(ce); /* active reference transferred to request */
+ if (IS_ERR(rq))
+ goto err_unlock;
+
+ /* Check that we do not interrupt ourselves with a new request */
+ rq->cookie = lockdep_pin_lock(&ce->ring->timeline->mutex);
+
+ return rq;
+
+err_unlock:
+ intel_context_timeline_unlock(ce);
+ return rq;
+}
+
static int
i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
{
@@ -829,7 +787,7 @@ already_busywaiting(struct i915_request *rq)
*
* See the are-we-too-late? check in __i915_request_submit().
*/
- return rq->sched.semaphores | rq->hw_context->saturated;
+ return rq->sched.semaphores | rq->engine->saturated;
}
static int
@@ -854,13 +812,13 @@ emit_semaphore_wait(struct i915_request *to,
if (err < 0)
return err;
- /* We need to pin the signaler's HWSP until we are finished reading. */
- err = i915_timeline_read_hwsp(from, to, &hwsp_offset);
+ /* Only submit our spinner after the signaler is running! */
+ err = __i915_request_await_execution(to, from, NULL, gfp);
if (err)
return err;
- /* Only submit our spinner after the signaler is running! */
- err = i915_request_await_execution(to, from, gfp);
+ /* We need to pin the signaler's HWSP until we are finished reading. */
+ err = i915_timeline_read_hwsp(from, to, &hwsp_offset);
if (err)
return err;
@@ -991,6 +949,52 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
return 0;
}
+int
+i915_request_await_execution(struct i915_request *rq,
+ struct dma_fence *fence,
+ void (*hook)(struct i915_request *rq,
+ struct dma_fence *signal))
+{
+ struct dma_fence **child = &fence;
+ unsigned int nchild = 1;
+ int ret;
+
+ if (dma_fence_is_array(fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+
+ /* XXX Error for signal-on-any fence arrays */
+
+ child = array->fences;
+ nchild = array->num_fences;
+ GEM_BUG_ON(!nchild);
+ }
+
+ do {
+ fence = *child++;
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ continue;
+
+ /*
+ * We don't squash repeated fence dependencies here as we
+ * want to run our callback in all cases.
+ */
+
+ if (dma_fence_is_i915(fence))
+ ret = __i915_request_await_execution(rq,
+ to_request(fence),
+ hook,
+ I915_FENCE_GFP);
+ else
+ ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
+ I915_FENCE_TIMEOUT,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ } while (--nchild);
+
+ return 0;
+}
+
/**
* i915_request_await_object - set this request to (async) wait upon a bo
* @to: request we are wishing to use
@@ -1023,7 +1027,7 @@ i915_request_await_object(struct i915_request *to,
struct dma_fence **shared;
unsigned int count, i;
- ret = reservation_object_get_fences_rcu(obj->resv,
+ ret = reservation_object_get_fences_rcu(obj->base.resv,
&excl, &count, &shared);
if (ret)
return ret;
@@ -1040,7 +1044,7 @@ i915_request_await_object(struct i915_request *to,
dma_fence_put(shared[i]);
kfree(shared);
} else {
- excl = reservation_object_get_excl_rcu(obj->resv);
+ excl = reservation_object_get_excl_rcu(obj->base.resv);
}
if (excl) {
@@ -1100,8 +1104,7 @@ __i915_request_add_to_timeline(struct i915_request *rq)
* precludes optimising to use semaphores serialisation of a single
* timeline across engines.
*/
- prev = i915_active_request_raw(&timeline->last_request,
- &rq->i915->drm.struct_mutex);
+ prev = rcu_dereference_protected(timeline->last_request.request, 1);
if (prev && !i915_request_completed(prev)) {
if (is_power_of_2(prev->engine->mask | rq->engine->mask))
i915_sw_fence_await_sw_fence(&rq->submit,
@@ -1118,10 +1121,13 @@ __i915_request_add_to_timeline(struct i915_request *rq)
0);
}
- spin_lock_irq(&timeline->lock);
list_add_tail(&rq->link, &timeline->requests);
- spin_unlock_irq(&timeline->lock);
+ /*
+ * Make sure that no request gazumped us - if it was allocated after
+ * our i915_request_alloc() and called __i915_request_add() before
+ * us, the timeline will hold its seqno which is later than ours.
+ */
GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
__i915_active_request_set(&timeline->last_request, rq);
@@ -1133,36 +1139,23 @@ __i915_request_add_to_timeline(struct i915_request *rq)
* request is not being tracked for completion but the work itself is
* going to happen on the hardware. This would be a Bad Thing(tm).
*/
-void i915_request_add(struct i915_request *request)
+struct i915_request *__i915_request_commit(struct i915_request *rq)
{
- struct intel_engine_cs *engine = request->engine;
- struct i915_timeline *timeline = request->timeline;
- struct intel_ring *ring = request->ring;
+ struct intel_engine_cs *engine = rq->engine;
+ struct intel_ring *ring = rq->ring;
struct i915_request *prev;
u32 *cs;
GEM_TRACE("%s fence %llx:%lld\n",
- engine->name, request->fence.context, request->fence.seqno);
-
- lockdep_assert_held(&request->timeline->mutex);
- lockdep_unpin_lock(&request->timeline->mutex, request->cookie);
-
- trace_i915_request_add(request);
-
- /*
- * Make sure that no request gazumped us - if it was allocated after
- * our i915_request_alloc() and called __i915_request_add() before
- * us, the timeline will hold its seqno which is later than ours.
- */
- GEM_BUG_ON(timeline->seqno != request->fence.seqno);
+ engine->name, rq->fence.context, rq->fence.seqno);
/*
* To ensure that this call will not fail, space for its emissions
* should already have been reserved in the ring buffer. Let the ring
* know that it is time to use that space up.
*/
- GEM_BUG_ON(request->reserved_space > request->ring->space);
- request->reserved_space = 0;
+ GEM_BUG_ON(rq->reserved_space > ring->space);
+ rq->reserved_space = 0;
/*
* Record the position of the start of the breadcrumb so that
@@ -1170,17 +1163,16 @@ void i915_request_add(struct i915_request *request)
* GPU processing the request, we never over-estimate the
* position of the ring's HEAD.
*/
- cs = intel_ring_begin(request, engine->emit_fini_breadcrumb_dw);
+ cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw);
GEM_BUG_ON(IS_ERR(cs));
- request->postfix = intel_ring_offset(request, cs);
+ rq->postfix = intel_ring_offset(rq, cs);
- prev = __i915_request_add_to_timeline(request);
+ prev = __i915_request_add_to_timeline(rq);
- list_add_tail(&request->ring_link, &ring->request_list);
- if (list_is_first(&request->ring_link, &ring->request_list))
- list_add(&ring->active_link, &request->i915->gt.active_rings);
- request->i915->gt.active_engines |= request->engine->mask;
- request->emitted_jiffies = jiffies;
+ list_add_tail(&rq->ring_link, &ring->request_list);
+ if (list_is_first(&rq->ring_link, &ring->request_list))
+ list_add(&ring->active_link, &rq->i915->gt.active_rings);
+ rq->emitted_jiffies = jiffies;
/*
* Let the backend know a new request has arrived that may need
@@ -1194,10 +1186,10 @@ void i915_request_add(struct i915_request *request)
* run at the earliest possible convenience.
*/
local_bh_disable();
- i915_sw_fence_commit(&request->semaphore);
+ i915_sw_fence_commit(&rq->semaphore);
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
if (engine->schedule) {
- struct i915_sched_attr attr = request->gem_context->sched;
+ struct i915_sched_attr attr = rq->gem_context->sched;
/*
* Boost actual workloads past semaphores!
@@ -1211,7 +1203,7 @@ void i915_request_add(struct i915_request *request)
* far in the distance past over useful work, we keep a history
* of any semaphore use along our dependency chain.
*/
- if (!(request->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
+ if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
attr.priority |= I915_PRIORITY_NOSEMAPHORE;
/*
@@ -1220,15 +1212,29 @@ void i915_request_add(struct i915_request *request)
* Allow interactive/synchronous clients to jump ahead of
* the bulk clients. (FQ_CODEL)
*/
- if (list_empty(&request->sched.signalers_list))
+ if (list_empty(&rq->sched.signalers_list))
attr.priority |= I915_PRIORITY_WAIT;
- engine->schedule(request, &attr);
+ engine->schedule(rq, &attr);
}
rcu_read_unlock();
- i915_sw_fence_commit(&request->submit);
+ i915_sw_fence_commit(&rq->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
+ return prev;
+}
+
+void i915_request_add(struct i915_request *rq)
+{
+ struct i915_request *prev;
+
+ lockdep_assert_held(&rq->timeline->mutex);
+ lockdep_unpin_lock(&rq->timeline->mutex, rq->cookie);
+
+ trace_i915_request_add(rq);
+
+ prev = __i915_request_commit(rq);
+
/*
* In typical scenarios, we do not expect the previous request on
* the timeline to be still tracked by timeline->last_request if it
@@ -1249,7 +1255,7 @@ void i915_request_add(struct i915_request *request)
if (prev && i915_request_completed(prev))
i915_request_retire_upto(prev);
- mutex_unlock(&request->timeline->mutex);
+ mutex_unlock(&rq->timeline->mutex);
}
static unsigned long local_clock_us(unsigned int *cpu)
@@ -1354,10 +1360,6 @@ static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
* maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
* unbounded wait).
*
- * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
- * in via the flags, and vice versa if the struct_mutex is not held, the caller
- * must not specify that the wait is locked.
- *
* Returns the remaining time (in jiffies) if the request completed, which may
* be zero or -ETIME if the request is unfinished after the timeout expires.
* May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
@@ -1374,7 +1376,7 @@ long i915_request_wait(struct i915_request *rq,
might_sleep();
GEM_BUG_ON(timeout < 0);
- if (i915_request_completed(rq))
+ if (dma_fence_is_signaled(&rq->fence))
return timeout;
if (!timeout)
@@ -1382,9 +1384,43 @@ long i915_request_wait(struct i915_request *rq,
trace_i915_request_wait_begin(rq, flags);
- /* Optimistic short spin before touching IRQs */
- if (__i915_spin_request(rq, state, 5))
+ /*
+ * We must never wait on the GPU while holding a lock as we
+ * may need to perform a GPU reset. So while we don't need to
+ * serialise wait/reset with an explicit lock, we do want
+ * lockdep to detect potential dependency cycles.
+ */
+ mutex_acquire(&rq->i915->gpu_error.wedge_mutex.dep_map,
+ 0, 0, _THIS_IP_);
+
+ /*
+ * Optimistic spin before touching IRQs.
+ *
+ * We may use a rather large value here to offset the penalty of
+ * switching away from the active task. Frequently, the client will
+ * wait upon an old swapbuffer to throttle itself to remain within a
+ * frame of the gpu. If the client is running in lockstep with the gpu,
+ * then it should not be waiting long at all, and a sleep now will incur
+ * extra scheduler latency in producing the next frame. To try to
+ * avoid adding the cost of enabling/disabling the interrupt to the
+ * short wait, we first spin to see if the request would have completed
+ * in the time taken to setup the interrupt.
+ *
+ * We need upto 5us to enable the irq, and upto 20us to hide the
+ * scheduler latency of a context switch, ignoring the secondary
+ * impacts from a context switch such as cache eviction.
+ *
+ * The scheme used for low-latency IO is called "hybrid interrupt
+ * polling". The suggestion there is to sleep until just before you
+ * expect to be woken by the device interrupt and then poll for its
+ * completion. That requires having a good predictor for the request
+ * duration, which we currently lack.
+ */
+ if (CONFIG_DRM_I915_SPIN_REQUEST &&
+ __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) {
+ dma_fence_signal(&rq->fence);
goto out;
+ }
/*
* This client is about to stall waiting for the GPU. In many cases
@@ -1401,9 +1437,7 @@ long i915_request_wait(struct i915_request *rq,
if (flags & I915_WAIT_PRIORITY) {
if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
gen6_rps_boost(rq);
- local_bh_disable(); /* suspend tasklets for reprioritisation */
i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
- local_bh_enable(); /* kick tasklets en masse */
}
wait.tsk = current;
@@ -1433,25 +1467,25 @@ long i915_request_wait(struct i915_request *rq,
dma_fence_remove_callback(&rq->fence, &wait.cb);
out:
+ mutex_release(&rq->i915->gpu_error.wedge_mutex.dep_map, 0, _THIS_IP_);
trace_i915_request_wait_end(rq);
return timeout;
}
-void i915_retire_requests(struct drm_i915_private *i915)
+bool i915_retire_requests(struct drm_i915_private *i915)
{
struct intel_ring *ring, *tmp;
lockdep_assert_held(&i915->drm.struct_mutex);
- if (!i915->gt.active_requests)
- return;
-
list_for_each_entry_safe(ring, tmp,
&i915->gt.active_rings, active_link) {
intel_ring_get(ring); /* last rq holds reference! */
ring_retire_requests(ring);
intel_ring_put(ring);
}
+
+ return !list_empty(&i915->gt.active_rings);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index a982664618c2..edbbdfec24ab 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -28,6 +28,8 @@
#include <linux/dma-fence.h>
#include <linux/lockdep.h>
+#include "gt/intel_engine_types.h"
+
#include "i915_gem.h"
#include "i915_scheduler.h"
#include "i915_selftest.h"
@@ -156,6 +158,7 @@ struct i915_request {
*/
struct i915_sched_node sched;
struct i915_dependency dep;
+ intel_engine_mask_t execution_mask;
/*
* A convenience pointer to the current breadcrumb value stored in
@@ -214,7 +217,7 @@ struct i915_request {
bool waitboost;
- /** engine->request_list entry for this request */
+ /** timeline->request entry for this request */
struct list_head link;
/** ring->request_list entry for this request */
@@ -240,8 +243,12 @@ static inline bool dma_fence_is_i915(const struct dma_fence *fence)
}
struct i915_request * __must_check
-i915_request_alloc(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx);
+__i915_request_create(struct intel_context *ce, gfp_t gfp);
+struct i915_request * __must_check
+i915_request_create(struct intel_context *ce);
+
+struct i915_request *__i915_request_commit(struct i915_request *request);
+
void i915_request_retire_upto(struct i915_request *rq);
static inline struct i915_request *
@@ -276,6 +283,10 @@ int i915_request_await_object(struct i915_request *to,
bool write);
int i915_request_await_dma_fence(struct i915_request *rq,
struct dma_fence *fence);
+int i915_request_await_execution(struct i915_request *rq,
+ struct dma_fence *fence,
+ void (*hook)(struct i915_request *rq,
+ struct dma_fence *signal));
void i915_request_add(struct i915_request *rq);
@@ -418,6 +429,6 @@ static inline void i915_request_mark_complete(struct i915_request *rq)
rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
}
-void i915_retire_requests(struct drm_i915_private *i915);
+bool i915_retire_requests(struct drm_i915_private *i915);
#endif /* I915_REQUEST_H */
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c
new file mode 100644
index 000000000000..cc6b3846a8c7
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_scatterlist.c
@@ -0,0 +1,39 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#include "i915_scatterlist.h"
+
+bool i915_sg_trim(struct sg_table *orig_st)
+{
+ struct sg_table new_st;
+ struct scatterlist *sg, *new_sg;
+ unsigned int i;
+
+ if (orig_st->nents == orig_st->orig_nents)
+ return false;
+
+ if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
+ return false;
+
+ new_sg = new_st.sgl;
+ for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
+ sg_set_page(new_sg, sg_page(sg), sg->length, 0);
+ sg_dma_address(new_sg) = sg_dma_address(sg);
+ sg_dma_len(new_sg) = sg_dma_len(sg);
+
+ new_sg = sg_next(new_sg);
+ }
+ GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
+
+ sg_free_table(orig_st);
+
+ *orig_st = new_st;
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/scatterlist.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h
new file mode 100644
index 000000000000..6617963df9ed
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_scatterlist.h
@@ -0,0 +1,127 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef I915_SCATTERLIST_H
+#define I915_SCATTERLIST_H
+
+#include <linux/pfn.h>
+#include <linux/scatterlist.h>
+#include <linux/swiotlb.h>
+
+#include "i915_gem.h"
+
+/*
+ * Optimised SGL iterator for GEM objects
+ */
+static __always_inline struct sgt_iter {
+ struct scatterlist *sgp;
+ union {
+ unsigned long pfn;
+ dma_addr_t dma;
+ };
+ unsigned int curr;
+ unsigned int max;
+} __sgt_iter(struct scatterlist *sgl, bool dma) {
+ struct sgt_iter s = { .sgp = sgl };
+
+ if (s.sgp) {
+ s.max = s.curr = s.sgp->offset;
+ s.max += s.sgp->length;
+ if (dma)
+ s.dma = sg_dma_address(s.sgp);
+ else
+ s.pfn = page_to_pfn(sg_page(s.sgp));
+ }
+
+ return s;
+}
+
+static inline int __sg_page_count(const struct scatterlist *sg)
+{
+ return sg->length >> PAGE_SHIFT;
+}
+
+static inline struct scatterlist *____sg_next(struct scatterlist *sg)
+{
+ ++sg;
+ if (unlikely(sg_is_chain(sg)))
+ sg = sg_chain_ptr(sg);
+ return sg;
+}
+
+/**
+ * __sg_next - return the next scatterlist entry in a list
+ * @sg: The current sg entry
+ *
+ * Description:
+ * If the entry is the last, return NULL; otherwise, step to the next
+ * element in the array (@sg@+1). If that's a chain pointer, follow it;
+ * otherwise just return the pointer to the current element.
+ **/
+static inline struct scatterlist *__sg_next(struct scatterlist *sg)
+{
+ return sg_is_last(sg) ? NULL : ____sg_next(sg);
+}
+
+/**
+ * __for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
+ * @__dmap: DMA address (output)
+ * @__iter: 'struct sgt_iter' (iterator state, internal)
+ * @__sgt: sg_table to iterate over (input)
+ * @__step: step size
+ */
+#define __for_each_sgt_dma(__dmap, __iter, __sgt, __step) \
+ for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
+ ((__dmap) = (__iter).dma + (__iter).curr); \
+ (((__iter).curr += (__step)) >= (__iter).max) ? \
+ (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
+
+/**
+ * for_each_sgt_page - iterate over the pages of the given sg_table
+ * @__pp: page pointer (output)
+ * @__iter: 'struct sgt_iter' (iterator state, internal)
+ * @__sgt: sg_table to iterate over (input)
+ */
+#define for_each_sgt_page(__pp, __iter, __sgt) \
+ for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
+ ((__pp) = (__iter).pfn == 0 ? NULL : \
+ pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
+ (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
+ (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
+
+static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
+{
+ unsigned int page_sizes;
+
+ page_sizes = 0;
+ while (sg) {
+ GEM_BUG_ON(sg->offset);
+ GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE));
+ page_sizes |= sg->length;
+ sg = __sg_next(sg);
+ }
+
+ return page_sizes;
+}
+
+static inline unsigned int i915_sg_segment_size(void)
+{
+ unsigned int size = swiotlb_max_segment();
+
+ if (size == 0)
+ return SCATTERLIST_MAX_SEGMENT;
+
+ size = rounddown(size, PAGE_SIZE);
+ /* swiotlb_max_segment_size can return 1 byte when it means one page. */
+ if (size < PAGE_SIZE)
+ size = PAGE_SIZE;
+
+ return size;
+}
+
+bool i915_sg_trim(struct sg_table *orig_st);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 108f52e1bf35..2e9b38bdc33c 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -77,7 +77,7 @@ i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
bool first = true;
int idx, i;
- lockdep_assert_held(&engine->timeline.lock);
+ lockdep_assert_held(&engine->active.lock);
assert_priolists(execlists);
/* buckets sorted from highest [in slot 0] to lowest priority */
@@ -150,29 +150,49 @@ sched_lock_engine(const struct i915_sched_node *node,
struct intel_engine_cs *locked,
struct sched_cache *cache)
{
- struct intel_engine_cs *engine = node_to_request(node)->engine;
+ const struct i915_request *rq = node_to_request(node);
+ struct intel_engine_cs *engine;
GEM_BUG_ON(!locked);
- if (engine != locked) {
- spin_unlock(&locked->timeline.lock);
+ /*
+ * Virtual engines complicate acquiring the engine timeline lock,
+ * as their rq->engine pointer is not stable until under that
+ * engine lock. The simple ploy we use is to take the lock then
+ * check that the rq still belongs to the newly locked engine.
+ */
+ while (locked != (engine = READ_ONCE(rq->engine))) {
+ spin_unlock(&locked->active.lock);
memset(cache, 0, sizeof(*cache));
- spin_lock(&engine->timeline.lock);
+ spin_lock(&engine->active.lock);
+ locked = engine;
}
- return engine;
+ GEM_BUG_ON(locked != engine);
+ return locked;
}
-static bool inflight(const struct i915_request *rq,
- const struct intel_engine_cs *engine)
+static inline int rq_prio(const struct i915_request *rq)
{
- const struct i915_request *active;
+ return rq->sched.attr.priority | __NO_PREEMPTION;
+}
- if (!i915_request_is_active(rq))
- return false;
+static void kick_submission(struct intel_engine_cs *engine, int prio)
+{
+ const struct i915_request *inflight =
+ port_request(engine->execlists.port);
+
+ /*
+ * If we are already the currently executing context, don't
+ * bother evaluating if we should preempt ourselves, or if
+ * we expect nothing to change as a result of running the
+ * tasklet, i.e. we have not change the priority queue
+ * sufficiently to oust the running context.
+ */
+ if (!inflight || !i915_scheduler_need_preempt(prio, rq_prio(inflight)))
+ return;
- active = port_request(engine->execlists.port);
- return active->hw_context == rq->hw_context;
+ tasklet_hi_schedule(&engine->execlists.tasklet);
}
static void __i915_schedule(struct i915_sched_node *node,
@@ -189,10 +209,10 @@ static void __i915_schedule(struct i915_sched_node *node,
lockdep_assert_held(&schedule_lock);
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
- if (node_signaled(node))
+ if (prio <= READ_ONCE(node->attr.priority))
return;
- if (prio <= READ_ONCE(node->attr.priority))
+ if (node_signaled(node))
return;
stack.signaler = node;
@@ -258,28 +278,26 @@ static void __i915_schedule(struct i915_sched_node *node,
memset(&cache, 0, sizeof(cache));
engine = node_to_request(node)->engine;
- spin_lock(&engine->timeline.lock);
+ spin_lock(&engine->active.lock);
/* Fifo and depth-first replacement ensure our deps execute before us */
+ engine = sched_lock_engine(node, engine, &cache);
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
INIT_LIST_HEAD(&dep->dfs_link);
node = dep->signaler;
engine = sched_lock_engine(node, engine, &cache);
- lockdep_assert_held(&engine->timeline.lock);
+ lockdep_assert_held(&engine->active.lock);
/* Recheck after acquiring the engine->timeline.lock */
if (prio <= node->attr.priority || node_signaled(node))
continue;
+ GEM_BUG_ON(node_to_request(node)->engine != engine);
+
node->attr.priority = prio;
- if (!list_empty(&node->link)) {
- if (!cache.priolist)
- cache.priolist =
- i915_sched_lookup_priolist(engine,
- prio);
- list_move_tail(&node->link, cache.priolist);
- } else {
+
+ if (list_empty(&node->link)) {
/*
* If the request is not in the priolist queue because
* it is not yet runnable, then it doesn't contribute
@@ -288,8 +306,16 @@ static void __i915_schedule(struct i915_sched_node *node,
* queue; but in that case we may still need to reorder
* the inflight requests.
*/
- if (!i915_sw_fence_done(&node_to_request(node)->submit))
- continue;
+ continue;
+ }
+
+ if (!intel_engine_is_virtual(engine) &&
+ !i915_request_is_active(node_to_request(node))) {
+ if (!cache.priolist)
+ cache.priolist =
+ i915_sched_lookup_priolist(engine,
+ prio);
+ list_move_tail(&node->link, cache.priolist);
}
if (prio <= engine->execlists.queue_priority_hint)
@@ -297,18 +323,11 @@ static void __i915_schedule(struct i915_sched_node *node,
engine->execlists.queue_priority_hint = prio;
- /*
- * If we are already the currently executing context, don't
- * bother evaluating if we should preempt ourselves.
- */
- if (inflight(node_to_request(node), engine))
- continue;
-
/* Defer (tasklet) submission until after all of our updates. */
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ kick_submission(engine, prio);
}
- spin_unlock(&engine->timeline.lock);
+ spin_unlock(&engine->active.lock);
}
void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
@@ -422,8 +441,6 @@ void i915_sched_node_fini(struct i915_sched_node *node)
{
struct i915_dependency *dep, *tmp;
- GEM_BUG_ON(!list_empty(&node->link));
-
spin_lock_irq(&schedule_lock);
/*
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 07d243acf553..7eefccff39bf 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -52,4 +52,22 @@ static inline void i915_priolist_free(struct i915_priolist *p)
__i915_priolist_free(p);
}
+static inline bool i915_scheduler_need_preempt(int prio, int active)
+{
+ /*
+ * Allow preemption of low -> normal -> high, but we do
+ * not allow low priority tasks to preempt other low priority
+ * tasks under the impression that latency for low priority
+ * tasks does not matter (as much as background throughput),
+ * so kiss.
+ *
+ * More naturally we would write
+ * prio >= max(0, last);
+ * except that we wish to prevent triggering preemption at the same
+ * priority level: the task that is running should remain running
+ * to preserve FIFO ordering of dependencies.
+ */
+ return prio > max(I915_PRIORITY_NORMAL - 1, active);
+}
+
#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index 4f2b2eb7c3e5..3e309631bd0b 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -9,8 +9,8 @@
#include <linux/list.h>
+#include "gt/intel_engine_types.h"
#include "i915_priolist_types.h"
-#include "intel_engine_types.h"
struct drm_i915_private;
struct i915_request;
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 95f3dab1b229..a08d7d16621b 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -26,9 +26,11 @@
#include <drm/i915_drm.h>
+#include "display/intel_fbc.h"
+#include "display/intel_gmbus.h"
+
#include "i915_reg.h"
#include "intel_drv.h"
-#include "intel_fbc.h"
static void i915_save_display(struct drm_i915_private *dev_priv)
{
@@ -144,7 +146,7 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_i2c_reset(dev_priv);
+ intel_gmbus_reset(dev_priv);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 41313005af42..ecac1c386109 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -29,8 +29,11 @@
#include <linux/module.h>
#include <linux/stat.h>
#include <linux/sysfs.h>
-#include "intel_drv.h"
+
#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_pm.h"
+#include "intel_sideband.h"
static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
{
@@ -45,7 +48,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv,
intel_wakeref_t wakeref;
u64 res = 0;
- with_intel_runtime_pm(dev_priv, wakeref)
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
res = intel_rc6_residency_us(dev_priv, reg);
return DIV_ROUND_CLOSEST_ULL(res, 1000);
@@ -259,25 +262,23 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
intel_wakeref_t wakeref;
- int ret;
+ u32 freq;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- mutex_lock(&dev_priv->pcu_lock);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- u32 freq;
+ vlv_punit_get(dev_priv);
freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
+ vlv_punit_put(dev_priv);
+
+ freq = (freq >> 8) & 0xff;
} else {
- ret = intel_gpu_freq(dev_priv,
- intel_get_cagf(dev_priv,
- I915_READ(GEN6_RPSTAT1)));
+ freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1));
}
- mutex_unlock(&dev_priv->pcu_lock);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+ return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(dev_priv, freq));
}
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
@@ -318,12 +319,12 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
if (val < rps->min_freq || val > rps->max_freq)
return -EINVAL;
- mutex_lock(&dev_priv->pcu_lock);
+ mutex_lock(&rps->lock);
if (val != rps->boost_freq) {
rps->boost_freq = val;
boost = atomic_read(&rps->num_waiters);
}
- mutex_unlock(&dev_priv->pcu_lock);
+ mutex_unlock(&rps->lock);
if (boost)
schedule_work(&rps->work);
@@ -363,18 +364,15 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
- wakeref = intel_runtime_pm_get(dev_priv);
-
- mutex_lock(&dev_priv->pcu_lock);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ mutex_lock(&rps->lock);
val = intel_freq_opcode(dev_priv, val);
-
if (val < rps->min_freq ||
val > rps->max_freq ||
val < rps->min_freq_softlimit) {
- mutex_unlock(&dev_priv->pcu_lock);
- intel_runtime_pm_put(dev_priv, wakeref);
- return -EINVAL;
+ ret = -EINVAL;
+ goto unlock;
}
if (val > rps->rp0_freq)
@@ -392,9 +390,9 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
* frequency request may be unchanged. */
ret = intel_set_rps(dev_priv, val);
- mutex_unlock(&dev_priv->pcu_lock);
-
- intel_runtime_pm_put(dev_priv, wakeref);
+unlock:
+ mutex_unlock(&rps->lock);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return ret ?: count;
}
@@ -422,18 +420,15 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
- wakeref = intel_runtime_pm_get(dev_priv);
-
- mutex_lock(&dev_priv->pcu_lock);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ mutex_lock(&rps->lock);
val = intel_freq_opcode(dev_priv, val);
-
if (val < rps->min_freq ||
val > rps->max_freq ||
val > rps->max_freq_softlimit) {
- mutex_unlock(&dev_priv->pcu_lock);
- intel_runtime_pm_put(dev_priv, wakeref);
- return -EINVAL;
+ ret = -EINVAL;
+ goto unlock;
}
rps->min_freq_softlimit = val;
@@ -447,9 +442,9 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
* frequency request may be unchanged. */
ret = intel_set_rps(dev_priv, val);
- mutex_unlock(&dev_priv->pcu_lock);
-
- intel_runtime_pm_put(dev_priv, wakeref);
+unlock:
+ mutex_unlock(&rps->lock);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return ret ?: count;
}
diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c
index 5fbea0892f33..c311ce9c6f9d 100644
--- a/drivers/gpu/drm/i915/i915_timeline.c
+++ b/drivers/gpu/drm/i915/i915_timeline.c
@@ -61,7 +61,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE);
- spin_lock(&gt->hwsp_lock);
+ spin_lock_irq(&gt->hwsp_lock);
/* hwsp_free_list only contains HWSP that have available cachelines */
hwsp = list_first_entry_or_null(&gt->hwsp_free_list,
@@ -69,7 +69,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
if (!hwsp) {
struct i915_vma *vma;
- spin_unlock(&gt->hwsp_lock);
+ spin_unlock_irq(&gt->hwsp_lock);
hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL);
if (!hwsp)
@@ -86,7 +86,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
hwsp->free_bitmap = ~0ull;
hwsp->gt = gt;
- spin_lock(&gt->hwsp_lock);
+ spin_lock_irq(&gt->hwsp_lock);
list_add(&hwsp->free_link, &gt->hwsp_free_list);
}
@@ -96,7 +96,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
if (!hwsp->free_bitmap)
list_del(&hwsp->free_link);
- spin_unlock(&gt->hwsp_lock);
+ spin_unlock_irq(&gt->hwsp_lock);
GEM_BUG_ON(hwsp->vma->private != hwsp);
return hwsp->vma;
@@ -105,8 +105,9 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
{
struct i915_gt_timelines *gt = hwsp->gt;
+ unsigned long flags;
- spin_lock(&gt->hwsp_lock);
+ spin_lock_irqsave(&gt->hwsp_lock, flags);
/* As a cacheline becomes available, publish the HWSP on the freelist */
if (!hwsp->free_bitmap)
@@ -122,7 +123,7 @@ static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
kfree(hwsp);
}
- spin_unlock(&gt->hwsp_lock);
+ spin_unlock_irqrestore(&gt->hwsp_lock, flags);
}
static void __idle_cacheline_free(struct i915_timeline_cacheline *cl)
@@ -250,7 +251,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
timeline->fence_context = dma_fence_context_alloc(1);
- spin_lock_init(&timeline->lock);
mutex_init(&timeline->mutex);
INIT_ACTIVE_REQUEST(&timeline->last_request);
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index 27668a1a69a3..36e5e5a65155 100644
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -36,25 +36,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
struct i915_vma *hwsp);
void i915_timeline_fini(struct i915_timeline *tl);
-static inline void
-i915_timeline_set_subclass(struct i915_timeline *timeline,
- unsigned int subclass)
-{
- lockdep_set_subclass(&timeline->lock, subclass);
-
- /*
- * Due to an interesting quirk in lockdep's internal debug tracking,
- * after setting a subclass we must ensure the lock is used. Otherwise,
- * nr_unused_locks is incremented once too often.
- */
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- local_irq_disable();
- lock_map_acquire(&timeline->lock.dep_map);
- lock_map_release(&timeline->lock.dep_map);
- local_irq_enable();
-#endif
-}
-
struct i915_timeline *
i915_timeline_create(struct drm_i915_private *i915,
struct i915_vma *global_hwsp);
diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/i915_timeline_types.h
index 5256a0b5c5f7..fce5cb4f1090 100644
--- a/drivers/gpu/drm/i915/i915_timeline_types.h
+++ b/drivers/gpu/drm/i915/i915_timeline_types.h
@@ -23,9 +23,6 @@ struct i915_timeline {
u64 fence_context;
u32 seqno;
- spinlock_t lock;
-#define TIMELINE_CLIENT 0 /* default subclass */
-#define TIMELINE_ENGINE 1
struct mutex mutex; /* protects the flow of requests */
unsigned int pin_count;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 12893304c8f8..f4ce643b3bc3 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -8,9 +8,11 @@
#include <drm/drm_drv.h>
+#include "gt/intel_engine.h"
+
#include "i915_drv.h"
+#include "i915_irq.h"
#include "intel_drv.h"
-#include "intel_ringbuffer.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM i915
@@ -861,10 +863,9 @@ TRACE_EVENT(i915_request_wait_begin,
__entry->flags = flags;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, blocking=%u, flags=0x%x",
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
__entry->dev, __entry->class, __entry->instance,
__entry->hw_id, __entry->ctx, __entry->seqno,
- !!(__entry->flags & I915_WAIT_LOCKED),
__entry->flags)
);
@@ -975,7 +976,7 @@ DECLARE_EVENT_CLASS(i915_context,
__entry->dev = ctx->i915->drm.primary->index;
__entry->ctx = ctx;
__entry->hw_id = ctx->hw_id;
- __entry->vm = ctx->ppgtt ? &ctx->ppgtt->vm : NULL;
+ __entry->vm = ctx->vm;
),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 2dbe8933b50a..2987219a6300 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -25,6 +25,12 @@
#ifndef __I915_UTILS_H
#define __I915_UTILS_H
+#include <linux/list.h>
+#include <linux/overflow.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
#if 0
@@ -73,6 +79,39 @@
#define overflows_type(x, T) \
(sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
+static inline bool
+__check_struct_size(size_t base, size_t arr, size_t count, size_t *size)
+{
+ size_t sz;
+
+ if (check_mul_overflow(count, arr, &sz))
+ return false;
+
+ if (check_add_overflow(sz, base, &sz))
+ return false;
+
+ *size = sz;
+ return true;
+}
+
+/**
+ * check_struct_size() - Calculate size of structure with trailing array.
+ * @p: Pointer to the structure.
+ * @member: Name of the array member.
+ * @n: Number of elements in the array.
+ * @sz: Total size of structure and array
+ *
+ * Calculates size of memory needed for structure @p followed by an
+ * array of @n @member elements, like struct_size() but reports
+ * whether it overflowed, and the resultant size in @sz
+ *
+ * Return: false if the calculation overflowed.
+ */
+#define check_struct_size(p, member, n, sz) \
+ likely(__check_struct_size(sizeof(*(p)), \
+ sizeof(*(p)->member) + __must_be_array((p)->member), \
+ n, sz))
+
#define ptr_mask_bits(ptr, n) ({ \
unsigned long __v = (unsigned long)(ptr); \
(typeof(ptr))(__v & -BIT(n)); \
@@ -97,6 +136,8 @@
#define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT)
#define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT)
+#define struct_member(T, member) (((T *)0)->member)
+
#define ptr_offset(ptr, member) offsetof(typeof(*(ptr)), member)
#define fetch_and_zero(ptr) ({ \
@@ -113,7 +154,7 @@
*/
#define container_of_user(ptr, type, member) ({ \
void __user *__mptr = (void __user *)(ptr); \
- BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
+ BUILD_BUG_ON_MSG(!__same_type(*(ptr), struct_member(type, member)) && \
!__same_type(*(ptr), void), \
"pointer type mismatch in container_of()"); \
((type __user *)(__mptr - offsetof(type, member))); })
@@ -152,8 +193,6 @@ static inline u64 ptr_to_u64(const void *ptr)
__idx; \
})
-#include <linux/list.h>
-
static inline void __list_del_many(struct list_head *head,
struct list_head *first)
{
@@ -174,6 +213,148 @@ static inline void drain_delayed_work(struct delayed_work *dw)
} while (delayed_work_pending(dw));
}
+static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
+{
+ unsigned long j = msecs_to_jiffies(m);
+
+ return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
+/*
+ * If you need to wait X milliseconds between events A and B, but event B
+ * doesn't happen exactly after event A, you record the timestamp (jiffies) of
+ * when event A happened, then just before event B you call this function and
+ * pass the timestamp as the first argument, and X as the second argument.
+ */
+static inline void
+wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
+{
+ unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
+
+ /*
+ * Don't re-read the value of "jiffies" every time since it may change
+ * behind our back and break the math.
+ */
+ tmp_jiffies = jiffies;
+ target_jiffies = timestamp_jiffies +
+ msecs_to_jiffies_timeout(to_wait_ms);
+
+ if (time_after(target_jiffies, tmp_jiffies)) {
+ remaining_jiffies = target_jiffies - tmp_jiffies;
+ while (remaining_jiffies)
+ remaining_jiffies =
+ schedule_timeout_uninterruptible(remaining_jiffies);
+ }
+}
+
+/**
+ * __wait_for - magic wait macro
+ *
+ * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
+ * important that we check the condition again after having timed out, since the
+ * timeout could be due to preemption or similar and we've never had a chance to
+ * check the condition before the timeout.
+ */
+#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
+ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
+ long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
+ int ret__; \
+ might_sleep(); \
+ for (;;) { \
+ const bool expired__ = ktime_after(ktime_get_raw(), end__); \
+ OP; \
+ /* Guarantee COND check prior to timeout */ \
+ barrier(); \
+ if (COND) { \
+ ret__ = 0; \
+ break; \
+ } \
+ if (expired__) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ usleep_range(wait__, wait__ * 2); \
+ if (wait__ < (Wmax)) \
+ wait__ <<= 1; \
+ } \
+ ret__; \
+})
+
+#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
+ (Wmax))
+#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
+
+/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
+#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
+#else
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
+#endif
+
+#define _wait_for_atomic(COND, US, ATOMIC) \
+({ \
+ int cpu, ret, timeout = (US) * 1000; \
+ u64 base; \
+ _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
+ if (!(ATOMIC)) { \
+ preempt_disable(); \
+ cpu = smp_processor_id(); \
+ } \
+ base = local_clock(); \
+ for (;;) { \
+ u64 now = local_clock(); \
+ if (!(ATOMIC)) \
+ preempt_enable(); \
+ /* Guarantee COND check prior to timeout */ \
+ barrier(); \
+ if (COND) { \
+ ret = 0; \
+ break; \
+ } \
+ if (now - base >= timeout) { \
+ ret = -ETIMEDOUT; \
+ break; \
+ } \
+ cpu_relax(); \
+ if (!(ATOMIC)) { \
+ preempt_disable(); \
+ if (unlikely(cpu != smp_processor_id())) { \
+ timeout -= now - base; \
+ cpu = smp_processor_id(); \
+ base = local_clock(); \
+ } \
+ } \
+ } \
+ ret; \
+})
+
+#define wait_for_us(COND, US) \
+({ \
+ int ret__; \
+ BUILD_BUG_ON(!__builtin_constant_p(US)); \
+ if ((US) > 10) \
+ ret__ = _wait_for((COND), (US), 10, 10); \
+ else \
+ ret__ = _wait_for_atomic((COND), (US), 0); \
+ ret__; \
+})
+
+#define wait_for_atomic_us(COND, US) \
+({ \
+ BUILD_BUG_ON(!__builtin_constant_p(US)); \
+ BUILD_BUG_ON((US) > 50000); \
+ _wait_for_atomic((COND), (US), 1); \
+})
+
+#define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000)
+
+#define KHz(x) (1000 * (x))
+#define MHz(x) KHz(1000 * (x))
+
+#define KBps(x) (1000 * (x))
+#define MBps(x) KBps(1000 * (x))
+#define GBps(x) ((u64)1000 * MBps((x)))
+
static inline const char *yesno(bool v)
{
return v ? "yes" : "no";
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 961268f66c63..a57729be8312 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -22,14 +22,15 @@
*
*/
-#include "i915_vma.h"
+#include <drm/drm_gem.h>
+
+#include "display/intel_frontbuffer.h"
+
+#include "gt/intel_engine.h"
#include "i915_drv.h"
#include "i915_globals.h"
-#include "intel_ringbuffer.h"
-#include "intel_frontbuffer.h"
-
-#include <drm/drm_gem.h>
+#include "i915_vma.h"
static struct i915_global_vma {
struct i915_global base;
@@ -79,11 +80,11 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason)
static void obj_bump_mru(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned long flags;
- spin_lock(&i915->mm.obj_lock);
- if (obj->bind_count)
- list_move_tail(&obj->mm.link, &i915->mm.bound_list);
- spin_unlock(&i915->mm.obj_lock);
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
obj->mm.dirty = true; /* be paranoid */
}
@@ -98,10 +99,10 @@ static void __i915_vma_retire(struct i915_active *ref)
return;
/* Prune the shared fence arrays iff completely idle (inc. external) */
- if (reservation_object_trylock(obj->resv)) {
- if (reservation_object_test_signaled_rcu(obj->resv, true))
- reservation_object_add_excl_fence(obj->resv, NULL);
- reservation_object_unlock(obj->resv);
+ if (reservation_object_trylock(obj->base.resv)) {
+ if (reservation_object_test_signaled_rcu(obj->base.resv, true))
+ reservation_object_add_excl_fence(obj->base.resv, NULL);
+ reservation_object_unlock(obj->base.resv);
}
/*
@@ -109,12 +110,10 @@ static void __i915_vma_retire(struct i915_active *ref)
* so that we don't steal from recently used but inactive objects
* (unless we are forced to ofc!)
*/
- obj_bump_mru(obj);
+ if (i915_gem_object_is_shrinkable(obj))
+ obj_bump_mru(obj);
- if (i915_gem_object_has_active_reference(obj)) {
- i915_gem_object_clear_active_reference(obj);
- i915_gem_object_put(obj);
- }
+ i915_gem_object_put(obj); /* and drop the active reference */
}
static struct i915_vma *
@@ -132,16 +131,18 @@ vma_create(struct drm_i915_gem_object *obj,
if (vma == NULL)
return ERR_PTR(-ENOMEM);
- i915_active_init(vm->i915, &vma->active, __i915_vma_retire);
- INIT_ACTIVE_REQUEST(&vma->last_fence);
-
vma->vm = vm;
vma->ops = &vm->vma_ops;
vma->obj = obj;
- vma->resv = obj->resv;
+ vma->resv = obj->base.resv;
vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
+ i915_active_init(vm->i915, &vma->active, __i915_vma_retire);
+ INIT_ACTIVE_REQUEST(&vma->last_fence);
+
+ INIT_LIST_HEAD(&vma->closed_link);
+
if (view && view->type != I915_GGTT_VIEW_NORMAL) {
vma->ggtt_view = *view;
if (view->type == I915_GGTT_VIEW_PARTIAL) {
@@ -155,6 +156,9 @@ vma_create(struct drm_i915_gem_object *obj,
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
vma->size = intel_rotation_info_size(&view->rotated);
vma->size <<= PAGE_SHIFT;
+ } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
+ vma->size = intel_remapped_info_size(&view->remapped);
+ vma->size <<= PAGE_SHIFT;
}
}
@@ -360,7 +364,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
int err;
/* Access through the GTT requires the device to be awake. */
- assert_rpm_wakelock_held(vma->vm->i915);
+ assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
@@ -439,7 +443,7 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
if (flags & I915_VMA_RELEASE_MAP)
i915_gem_object_unpin_map(obj);
- __i915_gem_object_release_unless_active(obj);
+ i915_gem_object_put(obj);
}
bool i915_vma_misplaced(const struct i915_vma *vma,
@@ -476,13 +480,6 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!vma->fence_size);
- /*
- * Explicitly disable for rotated VMA since the display does not
- * need the fence and the VMA is not accessible to other users.
- */
- if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
- return;
-
fenceable = (vma->node.size >= vma->fence_size &&
IS_ALIGNED(vma->node.start, vma->fence_alignment));
@@ -538,7 +535,7 @@ static void assert_bind_count(const struct drm_i915_gem_object *obj)
* assume that no else is pinning the pages, but as a rough assertion
* that we will not run into problems later, this will do!)
*/
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
}
/**
@@ -680,14 +677,8 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
mutex_unlock(&vma->vm->mutex);
if (vma->obj) {
- struct drm_i915_gem_object *obj = vma->obj;
-
- spin_lock(&dev_priv->mm.obj_lock);
- list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
- obj->bind_count++;
- spin_unlock(&dev_priv->mm.obj_lock);
-
- assert_bind_count(obj);
+ atomic_inc(&vma->obj->bind_count);
+ assert_bind_count(vma->obj);
}
return 0;
@@ -703,8 +694,6 @@ err_unpin:
static void
i915_vma_remove(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = vma->vm->i915;
-
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
@@ -722,10 +711,7 @@ i915_vma_remove(struct i915_vma *vma)
if (vma->obj) {
struct drm_i915_gem_object *obj = vma->obj;
- spin_lock(&i915->mm.obj_lock);
- if (--obj->bind_count == 0)
- list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
- spin_unlock(&i915->mm.obj_lock);
+ atomic_dec(&obj->bind_count);
/*
* And finally now the object is completely decoupled from this
@@ -784,10 +770,10 @@ err_unpin:
void i915_vma_close(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+ struct drm_i915_private *i915 = vma->vm->i915;
+ unsigned long flags;
GEM_BUG_ON(i915_vma_is_closed(vma));
- vma->flags |= I915_VMA_CLOSED;
/*
* We defer actually closing, unbinding and destroying the VMA until
@@ -801,17 +787,26 @@ void i915_vma_close(struct i915_vma *vma)
* causing us to rebind the VMA once more. This ends up being a lot
* of wasted work for the steady state.
*/
- list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma);
+ spin_lock_irqsave(&i915->gt.closed_lock, flags);
+ list_add(&vma->closed_link, &i915->gt.closed_vma);
+ spin_unlock_irqrestore(&i915->gt.closed_lock, flags);
}
-void i915_vma_reopen(struct i915_vma *vma)
+static void __i915_vma_remove_closed(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+ struct drm_i915_private *i915 = vma->vm->i915;
- if (vma->flags & I915_VMA_CLOSED) {
- vma->flags &= ~I915_VMA_CLOSED;
- list_del(&vma->closed_link);
- }
+ if (!i915_vma_is_closed(vma))
+ return;
+
+ spin_lock_irq(&i915->gt.closed_lock);
+ list_del_init(&vma->closed_link);
+ spin_unlock_irq(&i915->gt.closed_lock);
+}
+
+void i915_vma_reopen(struct i915_vma *vma)
+{
+ __i915_vma_remove_closed(vma);
}
static void __i915_vma_destroy(struct i915_vma *vma)
@@ -843,13 +838,13 @@ void i915_vma_destroy(struct i915_vma *vma)
{
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
- GEM_BUG_ON(i915_vma_is_active(vma));
GEM_BUG_ON(i915_vma_is_pinned(vma));
- if (i915_vma_is_closed(vma))
- list_del(&vma->closed_link);
+ __i915_vma_remove_closed(vma);
WARN_ON(i915_vma_unbind(vma));
+ GEM_BUG_ON(i915_vma_is_active(vma));
+
__i915_vma_destroy(vma);
}
@@ -857,12 +852,16 @@ void i915_vma_parked(struct drm_i915_private *i915)
{
struct i915_vma *vma, *next;
+ spin_lock_irq(&i915->gt.closed_lock);
list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
- GEM_BUG_ON(!i915_vma_is_closed(vma));
+ list_del_init(&vma->closed_link);
+ spin_unlock_irq(&i915->gt.closed_lock);
+
i915_vma_destroy(vma);
- }
- GEM_BUG_ON(!list_empty(&i915->gt.closed_vma));
+ spin_lock_irq(&i915->gt.closed_lock);
+ }
+ spin_unlock_irq(&i915->gt.closed_lock);
}
static void __i915_vma_iounmap(struct i915_vma *vma)
@@ -911,12 +910,10 @@ static void export_fence(struct i915_vma *vma,
* handle an error right now. Worst case should be missed
* synchronisation leading to rendering corruption.
*/
- reservation_object_lock(resv, NULL);
if (flags & EXEC_OBJECT_WRITE)
reservation_object_add_excl_fence(resv, &rq->fence);
else if (reservation_object_reserve_shared(resv, 1) == 0)
reservation_object_add_shared_fence(resv, &rq->fence);
- reservation_object_unlock(resv);
}
int i915_vma_move_to_active(struct i915_vma *vma,
@@ -925,7 +922,8 @@ int i915_vma_move_to_active(struct i915_vma *vma,
{
struct drm_i915_gem_object *obj = vma->obj;
- lockdep_assert_held(&rq->i915->drm.struct_mutex);
+ assert_vma_held(vma);
+ assert_object_held(obj);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
/*
@@ -936,12 +934,12 @@ int i915_vma_move_to_active(struct i915_vma *vma,
* add the active reference first and queue for it to be dropped
* *last*.
*/
- if (!vma->active.count)
- obj->active_count++;
+ if (!vma->active.count && !obj->active_count++)
+ i915_gem_object_get(obj); /* once more for the active ref */
if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) {
- if (!vma->active.count)
- obj->active_count--;
+ if (!vma->active.count && !--obj->active_count)
+ i915_gem_object_put(obj);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 6eab70953a57..4b769db649bf 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -32,7 +32,7 @@
#include "i915_gem_gtt.h"
#include "i915_gem_fence_reg.h"
-#include "i915_gem_object.h"
+#include "gem/i915_gem_object.h"
#include "i915_active.h"
#include "i915_request.h"
@@ -40,6 +40,8 @@
enum i915_cache_level;
/**
+ * DOC: Virtual Memory Address
+ *
* A VMA represents a GEM BO that is bound into an address space. Therefore, a
* VMA's presence cannot be guaranteed before binding, or after unbinding the
* object into/from the address space.
@@ -52,7 +54,7 @@ struct i915_vma {
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
const struct i915_vma_ops *ops;
- struct drm_i915_fence_reg *fence;
+ struct i915_fence_reg *fence;
struct reservation_object *resv; /** Alias of obj->resv */
struct sg_table *pages;
void __iomem *iomap;
@@ -69,7 +71,7 @@ struct i915_vma {
* handles (but same file) for execbuf, i.e. the number of aliases
* that exist in the ctx->handle_vmas LUT for this vma.
*/
- unsigned int open_count;
+ atomic_t open_count;
unsigned long flags;
/**
* How many users have pinned this object in GTT space.
@@ -104,10 +106,9 @@ struct i915_vma {
#define I915_VMA_GGTT BIT(11)
#define I915_VMA_CAN_FENCE BIT(12)
-#define I915_VMA_CLOSED BIT(13)
-#define I915_VMA_USERFAULT_BIT 14
+#define I915_VMA_USERFAULT_BIT 13
#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
-#define I915_VMA_GGTT_WRITE BIT(15)
+#define I915_VMA_GGTT_WRITE BIT(14)
struct i915_active active;
struct i915_active_request last_fence;
@@ -190,11 +191,6 @@ static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
return vma->flags & I915_VMA_CAN_FENCE;
}
-static inline bool i915_vma_is_closed(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_CLOSED;
-}
-
static inline bool i915_vma_set_userfault(struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
@@ -211,6 +207,11 @@ static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
}
+static inline bool i915_vma_is_closed(const struct i915_vma *vma)
+{
+ return !list_empty(&vma->closed_link);
+}
+
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
@@ -277,8 +278,11 @@ i915_vma_compare(struct i915_vma *vma,
*/
BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL >= I915_GGTT_VIEW_PARTIAL);
BUILD_BUG_ON(I915_GGTT_VIEW_PARTIAL >= I915_GGTT_VIEW_ROTATED);
+ BUILD_BUG_ON(I915_GGTT_VIEW_ROTATED >= I915_GGTT_VIEW_REMAPPED);
BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
offsetof(typeof(*view), partial));
+ BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
+ offsetof(typeof(*view), remapped));
return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
}
@@ -295,6 +299,18 @@ void i915_vma_close(struct i915_vma *vma);
void i915_vma_reopen(struct i915_vma *vma);
void i915_vma_destroy(struct i915_vma *vma);
+#define assert_vma_held(vma) reservation_object_assert_held((vma)->resv)
+
+static inline void i915_vma_lock(struct i915_vma *vma)
+{
+ reservation_object_lock(vma->resv, NULL);
+}
+
+static inline void i915_vma_unlock(struct i915_vma *vma)
+{
+ reservation_object_unlock(vma->resv);
+}
+
int __i915_vma_do_pin(struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
static inline int __must_check
diff --git a/drivers/gpu/drm/i915/intel_context.c b/drivers/gpu/drm/i915/intel_context.c
deleted file mode 100644
index 924cc556223a..000000000000
--- a/drivers/gpu/drm/i915/intel_context.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2019 Intel Corporation
- */
-
-#include "i915_drv.h"
-#include "i915_gem_context.h"
-#include "i915_globals.h"
-#include "intel_context.h"
-#include "intel_ringbuffer.h"
-
-static struct i915_global_context {
- struct i915_global base;
- struct kmem_cache *slab_ce;
-} global;
-
-struct intel_context *intel_context_alloc(void)
-{
- return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
-}
-
-void intel_context_free(struct intel_context *ce)
-{
- kmem_cache_free(global.slab_ce, ce);
-}
-
-struct intel_context *
-intel_context_lookup(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- struct intel_context *ce = NULL;
- struct rb_node *p;
-
- spin_lock(&ctx->hw_contexts_lock);
- p = ctx->hw_contexts.rb_node;
- while (p) {
- struct intel_context *this =
- rb_entry(p, struct intel_context, node);
-
- if (this->engine == engine) {
- GEM_BUG_ON(this->gem_context != ctx);
- ce = this;
- break;
- }
-
- if (this->engine < engine)
- p = p->rb_right;
- else
- p = p->rb_left;
- }
- spin_unlock(&ctx->hw_contexts_lock);
-
- return ce;
-}
-
-struct intel_context *
-__intel_context_insert(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_context *ce)
-{
- struct rb_node **p, *parent;
- int err = 0;
-
- spin_lock(&ctx->hw_contexts_lock);
-
- parent = NULL;
- p = &ctx->hw_contexts.rb_node;
- while (*p) {
- struct intel_context *this;
-
- parent = *p;
- this = rb_entry(parent, struct intel_context, node);
-
- if (this->engine == engine) {
- err = -EEXIST;
- ce = this;
- break;
- }
-
- if (this->engine < engine)
- p = &parent->rb_right;
- else
- p = &parent->rb_left;
- }
- if (!err) {
- rb_link_node(&ce->node, parent, p);
- rb_insert_color(&ce->node, &ctx->hw_contexts);
- }
-
- spin_unlock(&ctx->hw_contexts_lock);
-
- return ce;
-}
-
-void __intel_context_remove(struct intel_context *ce)
-{
- struct i915_gem_context *ctx = ce->gem_context;
-
- spin_lock(&ctx->hw_contexts_lock);
- rb_erase(&ce->node, &ctx->hw_contexts);
- spin_unlock(&ctx->hw_contexts_lock);
-}
-
-static struct intel_context *
-intel_context_instance(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- struct intel_context *ce, *pos;
-
- ce = intel_context_lookup(ctx, engine);
- if (likely(ce))
- return ce;
-
- ce = intel_context_alloc();
- if (!ce)
- return ERR_PTR(-ENOMEM);
-
- intel_context_init(ce, ctx, engine);
-
- pos = __intel_context_insert(ctx, engine, ce);
- if (unlikely(pos != ce)) /* Beaten! Use their HW context instead */
- intel_context_free(ce);
-
- GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos);
- return pos;
-}
-
-struct intel_context *
-intel_context_pin_lock(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
- __acquires(ce->pin_mutex)
-{
- struct intel_context *ce;
-
- ce = intel_context_instance(ctx, engine);
- if (IS_ERR(ce))
- return ce;
-
- if (mutex_lock_interruptible(&ce->pin_mutex))
- return ERR_PTR(-EINTR);
-
- return ce;
-}
-
-struct intel_context *
-intel_context_pin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- struct intel_context *ce;
- int err;
-
- ce = intel_context_instance(ctx, engine);
- if (IS_ERR(ce))
- return ce;
-
- if (likely(atomic_inc_not_zero(&ce->pin_count)))
- return ce;
-
- if (mutex_lock_interruptible(&ce->pin_mutex))
- return ERR_PTR(-EINTR);
-
- if (likely(!atomic_read(&ce->pin_count))) {
- err = ce->ops->pin(ce);
- if (err)
- goto err;
-
- i915_gem_context_get(ctx);
- GEM_BUG_ON(ce->gem_context != ctx);
-
- mutex_lock(&ctx->mutex);
- list_add(&ce->active_link, &ctx->active_engines);
- mutex_unlock(&ctx->mutex);
-
- intel_context_get(ce);
- smp_mb__before_atomic(); /* flush pin before it is visible */
- }
-
- atomic_inc(&ce->pin_count);
- GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
-
- mutex_unlock(&ce->pin_mutex);
- return ce;
-
-err:
- mutex_unlock(&ce->pin_mutex);
- return ERR_PTR(err);
-}
-
-void intel_context_unpin(struct intel_context *ce)
-{
- if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
- return;
-
- /* We may be called from inside intel_context_pin() to evict another */
- intel_context_get(ce);
- mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
-
- if (likely(atomic_dec_and_test(&ce->pin_count))) {
- ce->ops->unpin(ce);
-
- mutex_lock(&ce->gem_context->mutex);
- list_del(&ce->active_link);
- mutex_unlock(&ce->gem_context->mutex);
-
- i915_gem_context_put(ce->gem_context);
- intel_context_put(ce);
- }
-
- mutex_unlock(&ce->pin_mutex);
- intel_context_put(ce);
-}
-
-static void intel_context_retire(struct i915_active_request *active,
- struct i915_request *rq)
-{
- struct intel_context *ce =
- container_of(active, typeof(*ce), active_tracker);
-
- intel_context_unpin(ce);
-}
-
-void
-intel_context_init(struct intel_context *ce,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- kref_init(&ce->ref);
-
- ce->gem_context = ctx;
- ce->engine = engine;
- ce->ops = engine->cops;
- ce->saturated = 0;
-
- INIT_LIST_HEAD(&ce->signal_link);
- INIT_LIST_HEAD(&ce->signals);
-
- mutex_init(&ce->pin_mutex);
-
- /* Use the whole device by default */
- ce->sseu = intel_device_default_sseu(ctx->i915);
-
- i915_active_request_init(&ce->active_tracker,
- NULL, intel_context_retire);
-}
-
-static void i915_global_context_shrink(void)
-{
- kmem_cache_shrink(global.slab_ce);
-}
-
-static void i915_global_context_exit(void)
-{
- kmem_cache_destroy(global.slab_ce);
-}
-
-static struct i915_global_context global = { {
- .shrink = i915_global_context_shrink,
- .exit = i915_global_context_exit,
-} };
-
-int __init i915_global_context_init(void)
-{
- global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
- if (!global.slab_ce)
- return -ENOMEM;
-
- i915_global_register(&global.base);
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/intel_context.h b/drivers/gpu/drm/i915/intel_context.h
deleted file mode 100644
index ebc861b1a49e..000000000000
--- a/drivers/gpu/drm/i915/intel_context.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_CONTEXT_H__
-#define __INTEL_CONTEXT_H__
-
-#include <linux/lockdep.h>
-
-#include "intel_context_types.h"
-#include "intel_engine_types.h"
-
-struct intel_context *intel_context_alloc(void);
-void intel_context_free(struct intel_context *ce);
-
-void intel_context_init(struct intel_context *ce,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
-
-/**
- * intel_context_lookup - Find the matching HW context for this (ctx, engine)
- * @ctx - the parent GEM context
- * @engine - the target HW engine
- *
- * May return NULL if the HW context hasn't been instantiated (i.e. unused).
- */
-struct intel_context *
-intel_context_lookup(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
-
-/**
- * intel_context_pin_lock - Stablises the 'pinned' status of the HW context
- * @ctx - the parent GEM context
- * @engine - the target HW engine
- *
- * Acquire a lock on the pinned status of the HW context, such that the context
- * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
- * intel_context_is_pinned() remains stable.
- */
-struct intel_context *
-intel_context_pin_lock(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
-
-static inline bool
-intel_context_is_pinned(struct intel_context *ce)
-{
- return atomic_read(&ce->pin_count);
-}
-
-static inline void intel_context_pin_unlock(struct intel_context *ce)
-__releases(ce->pin_mutex)
-{
- mutex_unlock(&ce->pin_mutex);
-}
-
-struct intel_context *
-__intel_context_insert(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_context *ce);
-void
-__intel_context_remove(struct intel_context *ce);
-
-struct intel_context *
-intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
-
-static inline void __intel_context_pin(struct intel_context *ce)
-{
- GEM_BUG_ON(!intel_context_is_pinned(ce));
- atomic_inc(&ce->pin_count);
-}
-
-void intel_context_unpin(struct intel_context *ce);
-
-static inline struct intel_context *intel_context_get(struct intel_context *ce)
-{
- kref_get(&ce->ref);
- return ce;
-}
-
-static inline void intel_context_put(struct intel_context *ce)
-{
- kref_put(&ce->ref, ce->ops->destroy);
-}
-
-#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 96618af47088..6ef74531588a 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -70,6 +70,10 @@ MODULE_FIRMWARE(SKL_CSR_PATH);
MODULE_FIRMWARE(BXT_CSR_PATH);
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
+#define PACKAGE_MAX_FW_INFO_ENTRIES 20
+#define PACKAGE_V2_MAX_FW_INFO_ENTRIES 32
+#define DMC_V1_MAX_MMIO_COUNT 8
+#define DMC_V3_MAX_MMIO_COUNT 20
struct intel_css_header {
/* 0x09 for DMC */
@@ -116,7 +120,10 @@ struct intel_css_header {
} __packed;
struct intel_fw_info {
- u16 reserved1;
+ u8 reserved1;
+
+ /* reserved on package_header version 1, must be 0 on version 2 */
+ u8 dmc_id;
/* Stepping (A, B, C, ..., *). * is a wildcard */
char stepping;
@@ -130,28 +137,26 @@ struct intel_fw_info {
struct intel_package_header {
/* DMC container header length in dwords */
- unsigned char header_len;
+ u8 header_len;
- /* always value would be 0x01 */
- unsigned char header_ver;
+ /* 0x01, 0x02 */
+ u8 header_ver;
- unsigned char reserved[10];
+ u8 reserved[10];
/* Number of valid entries in the FWInfo array below */
u32 num_entries;
-
- struct intel_fw_info fw_info[20];
} __packed;
-struct intel_dmc_header {
+struct intel_dmc_header_base {
/* always value would be 0x40403E3E */
u32 signature;
/* DMC binary header length */
- unsigned char header_len;
+ u8 header_len;
/* 0x01 */
- unsigned char header_ver;
+ u8 header_ver;
/* Reserved */
u16 dmcc_ver;
@@ -164,22 +169,47 @@ struct intel_dmc_header {
/* Major Minor version */
u32 fw_version;
+} __packed;
+
+struct intel_dmc_header_v1 {
+ struct intel_dmc_header_base base;
/* Number of valid MMIO cycles present. */
u32 mmio_count;
/* MMIO address */
- u32 mmioaddr[8];
+ u32 mmioaddr[DMC_V1_MAX_MMIO_COUNT];
/* MMIO data */
- u32 mmiodata[8];
+ u32 mmiodata[DMC_V1_MAX_MMIO_COUNT];
/* FW filename */
- unsigned char dfile[32];
+ char dfile[32];
u32 reserved1[2];
} __packed;
+struct intel_dmc_header_v3 {
+ struct intel_dmc_header_base base;
+
+ /* DMC RAM start MMIO address */
+ u32 start_mmioaddr;
+
+ u32 reserved[9];
+
+ /* FW filename */
+ char dfile[32];
+
+ /* Number of valid MMIO cycles present. */
+ u32 mmio_count;
+
+ /* MMIO address */
+ u32 mmioaddr[DMC_V3_MAX_MMIO_COUNT];
+
+ /* MMIO data */
+ u32 mmiodata[DMC_V3_MAX_MMIO_COUNT];
+} __packed;
+
struct stepping_info {
char stepping;
char substepping;
@@ -273,7 +303,7 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
}
fw_size = dev_priv->csr.dmc_fw_size;
- assert_rpm_wakelock_held(dev_priv);
+ assert_rpm_wakelock_held(&dev_priv->runtime_pm);
preempt_disable();
@@ -292,142 +322,282 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
gen9_set_dc_state_debugmask(dev_priv);
}
-static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
- const struct firmware *fw)
+/*
+ * Search fw_info table for dmc_offset to find firmware binary: num_entries is
+ * already sanitized.
+ */
+static u32 find_dmc_fw_offset(const struct intel_fw_info *fw_info,
+ unsigned int num_entries,
+ const struct stepping_info *si,
+ u8 package_ver)
{
- struct intel_css_header *css_header;
- struct intel_package_header *package_header;
- struct intel_dmc_header *dmc_header;
- struct intel_csr *csr = &dev_priv->csr;
- const struct stepping_info *si = intel_get_stepping_info(dev_priv);
- u32 dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
- u32 i;
- u32 *dmc_payload;
- size_t fsize;
+ u32 dmc_offset = CSR_DEFAULT_FW_OFFSET;
+ unsigned int i;
- if (!fw)
- return NULL;
+ for (i = 0; i < num_entries; i++) {
+ if (package_ver > 1 && fw_info[i].dmc_id != 0)
+ continue;
- fsize = sizeof(struct intel_css_header) +
- sizeof(struct intel_package_header) +
- sizeof(struct intel_dmc_header);
- if (fsize > fw->size)
- goto error_truncated;
+ if (fw_info[i].substepping == '*' &&
+ si->stepping == fw_info[i].stepping) {
+ dmc_offset = fw_info[i].offset;
+ break;
+ }
- /* Extract CSS Header information*/
- css_header = (struct intel_css_header *)fw->data;
- if (sizeof(struct intel_css_header) !=
- (css_header->header_len * 4)) {
- DRM_ERROR("DMC firmware has wrong CSS header length "
- "(%u bytes)\n",
- (css_header->header_len * 4));
- return NULL;
- }
+ if (si->stepping == fw_info[i].stepping &&
+ si->substepping == fw_info[i].substepping) {
+ dmc_offset = fw_info[i].offset;
+ break;
+ }
- if (csr->required_version &&
- css_header->version != csr->required_version) {
- DRM_INFO("Refusing to load DMC firmware v%u.%u,"
- " please use v%u.%u\n",
- CSR_VERSION_MAJOR(css_header->version),
- CSR_VERSION_MINOR(css_header->version),
- CSR_VERSION_MAJOR(csr->required_version),
- CSR_VERSION_MINOR(csr->required_version));
- return NULL;
+ if (fw_info[i].stepping == '*' &&
+ fw_info[i].substepping == '*') {
+ /*
+ * In theory we should stop the search as generic
+ * entries should always come after the more specific
+ * ones, but let's continue to make sure to work even
+ * with "broken" firmwares. If we don't find a more
+ * specific one, then we use this entry
+ */
+ dmc_offset = fw_info[i].offset;
+ }
}
- csr->version = css_header->version;
+ return dmc_offset;
+}
- readcount += sizeof(struct intel_css_header);
+static u32 parse_csr_fw_dmc(struct intel_csr *csr,
+ const struct intel_dmc_header_base *dmc_header,
+ size_t rem_size)
+{
+ unsigned int header_len_bytes, dmc_header_size, payload_size, i;
+ const u32 *mmioaddr, *mmiodata;
+ u32 mmio_count, mmio_count_max;
+ u8 *payload;
- /* Extract Package Header information*/
- package_header = (struct intel_package_header *)
- &fw->data[readcount];
- if (sizeof(struct intel_package_header) !=
- (package_header->header_len * 4)) {
- DRM_ERROR("DMC firmware has wrong package header length "
- "(%u bytes)\n",
- (package_header->header_len * 4));
- return NULL;
- }
- readcount += sizeof(struct intel_package_header);
+ BUILD_BUG_ON(ARRAY_SIZE(csr->mmioaddr) < DMC_V3_MAX_MMIO_COUNT ||
+ ARRAY_SIZE(csr->mmioaddr) < DMC_V1_MAX_MMIO_COUNT);
- /* Search for dmc_offset to find firware binary. */
- for (i = 0; i < package_header->num_entries; i++) {
- if (package_header->fw_info[i].substepping == '*' &&
- si->stepping == package_header->fw_info[i].stepping) {
- dmc_offset = package_header->fw_info[i].offset;
- break;
- } else if (si->stepping == package_header->fw_info[i].stepping &&
- si->substepping == package_header->fw_info[i].substepping) {
- dmc_offset = package_header->fw_info[i].offset;
- break;
- } else if (package_header->fw_info[i].stepping == '*' &&
- package_header->fw_info[i].substepping == '*')
- dmc_offset = package_header->fw_info[i].offset;
- }
- if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
- DRM_ERROR("DMC firmware not supported for %c stepping\n",
- si->stepping);
- return NULL;
- }
- /* Convert dmc_offset into number of bytes. By default it is in dwords*/
- dmc_offset *= 4;
- readcount += dmc_offset;
- fsize += dmc_offset;
- if (fsize > fw->size)
+ /*
+ * Check if we can access common fields, we will checkc again below
+ * after we have read the version
+ */
+ if (rem_size < sizeof(struct intel_dmc_header_base))
goto error_truncated;
- /* Extract dmc_header information. */
- dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
- if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
+ /* Cope with small differences between v1 and v3 */
+ if (dmc_header->header_ver == 3) {
+ const struct intel_dmc_header_v3 *v3 =
+ (const struct intel_dmc_header_v3 *)dmc_header;
+
+ if (rem_size < sizeof(struct intel_dmc_header_v3))
+ goto error_truncated;
+
+ mmioaddr = v3->mmioaddr;
+ mmiodata = v3->mmiodata;
+ mmio_count = v3->mmio_count;
+ mmio_count_max = DMC_V3_MAX_MMIO_COUNT;
+ /* header_len is in dwords */
+ header_len_bytes = dmc_header->header_len * 4;
+ dmc_header_size = sizeof(*v3);
+ } else if (dmc_header->header_ver == 1) {
+ const struct intel_dmc_header_v1 *v1 =
+ (const struct intel_dmc_header_v1 *)dmc_header;
+
+ if (rem_size < sizeof(struct intel_dmc_header_v1))
+ goto error_truncated;
+
+ mmioaddr = v1->mmioaddr;
+ mmiodata = v1->mmiodata;
+ mmio_count = v1->mmio_count;
+ mmio_count_max = DMC_V1_MAX_MMIO_COUNT;
+ header_len_bytes = dmc_header->header_len;
+ dmc_header_size = sizeof(*v1);
+ } else {
+ DRM_ERROR("Unknown DMC fw header version: %u\n",
+ dmc_header->header_ver);
+ return 0;
+ }
+
+ if (header_len_bytes != dmc_header_size) {
DRM_ERROR("DMC firmware has wrong dmc header length "
- "(%u bytes)\n",
- (dmc_header->header_len));
- return NULL;
+ "(%u bytes)\n", header_len_bytes);
+ return 0;
}
- readcount += sizeof(struct intel_dmc_header);
/* Cache the dmc header info. */
- if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
- DRM_ERROR("DMC firmware has wrong mmio count %u\n",
- dmc_header->mmio_count);
- return NULL;
+ if (mmio_count > mmio_count_max) {
+ DRM_ERROR("DMC firmware has wrong mmio count %u\n", mmio_count);
+ return 0;
}
- csr->mmio_count = dmc_header->mmio_count;
- for (i = 0; i < dmc_header->mmio_count; i++) {
- if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
- dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
+
+ for (i = 0; i < mmio_count; i++) {
+ if (mmioaddr[i] < CSR_MMIO_START_RANGE ||
+ mmioaddr[i] > CSR_MMIO_END_RANGE) {
DRM_ERROR("DMC firmware has wrong mmio address 0x%x\n",
- dmc_header->mmioaddr[i]);
- return NULL;
+ mmioaddr[i]);
+ return 0;
}
- csr->mmioaddr[i] = _MMIO(dmc_header->mmioaddr[i]);
- csr->mmiodata[i] = dmc_header->mmiodata[i];
+ csr->mmioaddr[i] = _MMIO(mmioaddr[i]);
+ csr->mmiodata[i] = mmiodata[i];
}
+ csr->mmio_count = mmio_count;
+
+ rem_size -= header_len_bytes;
/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
- nbytes = dmc_header->fw_size * 4;
- fsize += nbytes;
- if (fsize > fw->size)
+ payload_size = dmc_header->fw_size * 4;
+ if (rem_size < payload_size)
goto error_truncated;
- if (nbytes > csr->max_fw_size) {
- DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes);
- return NULL;
+ if (payload_size > csr->max_fw_size) {
+ DRM_ERROR("DMC FW too big (%u bytes)\n", payload_size);
+ return 0;
}
csr->dmc_fw_size = dmc_header->fw_size;
- dmc_payload = kmalloc(nbytes, GFP_KERNEL);
- if (!dmc_payload) {
+ csr->dmc_payload = kmalloc(payload_size, GFP_KERNEL);
+ if (!csr->dmc_payload) {
DRM_ERROR("Memory allocation failed for dmc payload\n");
- return NULL;
+ return 0;
}
- return memcpy(dmc_payload, &fw->data[readcount], nbytes);
+ payload = (u8 *)(dmc_header) + header_len_bytes;
+ memcpy(csr->dmc_payload, payload, payload_size);
+
+ return header_len_bytes + payload_size;
error_truncated:
- DRM_ERROR("Truncated DMC firmware, rejecting.\n");
- return NULL;
+ DRM_ERROR("Truncated DMC firmware, refusing.\n");
+ return 0;
+}
+
+static u32
+parse_csr_fw_package(struct intel_csr *csr,
+ const struct intel_package_header *package_header,
+ const struct stepping_info *si,
+ size_t rem_size)
+{
+ u32 package_size = sizeof(struct intel_package_header);
+ u32 num_entries, max_entries, dmc_offset;
+ const struct intel_fw_info *fw_info;
+
+ if (rem_size < package_size)
+ goto error_truncated;
+
+ if (package_header->header_ver == 1) {
+ max_entries = PACKAGE_MAX_FW_INFO_ENTRIES;
+ } else if (package_header->header_ver == 2) {
+ max_entries = PACKAGE_V2_MAX_FW_INFO_ENTRIES;
+ } else {
+ DRM_ERROR("DMC firmware has unknown header version %u\n",
+ package_header->header_ver);
+ return 0;
+ }
+
+ /*
+ * We should always have space for max_entries,
+ * even if not all are used
+ */
+ package_size += max_entries * sizeof(struct intel_fw_info);
+ if (rem_size < package_size)
+ goto error_truncated;
+
+ if (package_header->header_len * 4 != package_size) {
+ DRM_ERROR("DMC firmware has wrong package header length "
+ "(%u bytes)\n", package_size);
+ return 0;
+ }
+
+ num_entries = package_header->num_entries;
+ if (WARN_ON(package_header->num_entries > max_entries))
+ num_entries = max_entries;
+
+ fw_info = (const struct intel_fw_info *)
+ ((u8 *)package_header + sizeof(*package_header));
+ dmc_offset = find_dmc_fw_offset(fw_info, num_entries, si,
+ package_header->header_ver);
+ if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
+ DRM_ERROR("DMC firmware not supported for %c stepping\n",
+ si->stepping);
+ return 0;
+ }
+
+ /* dmc_offset is in dwords */
+ return package_size + dmc_offset * 4;
+
+error_truncated:
+ DRM_ERROR("Truncated DMC firmware, refusing.\n");
+ return 0;
+}
+
+/* Return number of bytes parsed or 0 on error */
+static u32 parse_csr_fw_css(struct intel_csr *csr,
+ struct intel_css_header *css_header,
+ size_t rem_size)
+{
+ if (rem_size < sizeof(struct intel_css_header)) {
+ DRM_ERROR("Truncated DMC firmware, refusing.\n");
+ return 0;
+ }
+
+ if (sizeof(struct intel_css_header) !=
+ (css_header->header_len * 4)) {
+ DRM_ERROR("DMC firmware has wrong CSS header length "
+ "(%u bytes)\n",
+ (css_header->header_len * 4));
+ return 0;
+ }
+
+ if (csr->required_version &&
+ css_header->version != csr->required_version) {
+ DRM_INFO("Refusing to load DMC firmware v%u.%u,"
+ " please use v%u.%u\n",
+ CSR_VERSION_MAJOR(css_header->version),
+ CSR_VERSION_MINOR(css_header->version),
+ CSR_VERSION_MAJOR(csr->required_version),
+ CSR_VERSION_MINOR(csr->required_version));
+ return 0;
+ }
+
+ csr->version = css_header->version;
+
+ return sizeof(struct intel_css_header);
+}
+
+static void parse_csr_fw(struct drm_i915_private *dev_priv,
+ const struct firmware *fw)
+{
+ struct intel_css_header *css_header;
+ struct intel_package_header *package_header;
+ struct intel_dmc_header_base *dmc_header;
+ struct intel_csr *csr = &dev_priv->csr;
+ const struct stepping_info *si = intel_get_stepping_info(dev_priv);
+ u32 readcount = 0;
+ u32 r;
+
+ if (!fw)
+ return;
+
+ /* Extract CSS Header information */
+ css_header = (struct intel_css_header *)fw->data;
+ r = parse_csr_fw_css(csr, css_header, fw->size);
+ if (!r)
+ return;
+
+ readcount += r;
+
+ /* Extract Package Header information */
+ package_header = (struct intel_package_header *)&fw->data[readcount];
+ r = parse_csr_fw_package(csr, package_header, si, fw->size - readcount);
+ if (!r)
+ return;
+
+ readcount += r;
+
+ /* Extract dmc_header information */
+ dmc_header = (struct intel_dmc_header_base *)&fw->data[readcount];
+ parse_csr_fw_dmc(csr, dmc_header, fw->size - readcount);
}
static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
@@ -455,8 +625,7 @@ static void csr_load_work_fn(struct work_struct *work)
csr = &dev_priv->csr;
request_firmware(&fw, dev_priv->csr.fw_path, &dev_priv->drm.pdev->dev);
- if (fw)
- dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
+ parse_csr_fw(dev_priv, fw);
if (dev_priv->csr.dmc_payload) {
intel_csr_load_program(dev_priv);
@@ -547,8 +716,6 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (csr->fw_path == NULL) {
DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n");
- WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv)));
-
return;
}
diff --git a/drivers/gpu/drm/i915/intel_csr.h b/drivers/gpu/drm/i915/intel_csr.h
index 17a32c1e8a35..03c64f8af7ab 100644
--- a/drivers/gpu/drm/i915/intel_csr.h
+++ b/drivers/gpu/drm/i915/intel_csr.h
@@ -8,6 +8,10 @@
struct drm_i915_private;
+#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
+#define CSR_VERSION_MAJOR(version) ((version) >> 16)
+#define CSR_VERSION_MINOR(version) ((version) & 0xffff)
+
void intel_csr_ucode_init(struct drm_i915_private *i915);
void intel_csr_load_program(struct drm_i915_private *i915);
void intel_csr_ucode_fini(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 6af480b95bc6..7135d8dc32a7 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -90,10 +90,10 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
drm_printf(p, "slice total: %u, mask=%04x\n",
hweight8(sseu->slice_mask), sseu->slice_mask);
- drm_printf(p, "subslice total: %u\n", sseu_subslice_total(sseu));
+ drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
for (s = 0; s < sseu->max_slices; s++) {
drm_printf(p, "slice%d: %u subslices, mask=%04x\n",
- s, hweight8(sseu->subslice_mask[s]),
+ s, intel_sseu_subslices_per_slice(sseu, s),
sseu->subslice_mask[s]);
}
drm_printf(p, "EU total: %u\n", sseu->eu_total);
@@ -114,6 +114,40 @@ void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
info->cs_timestamp_frequency_khz);
}
+static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
+ int subslice)
+{
+ int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
+ int slice_stride = sseu->max_subslices * subslice_stride;
+
+ return slice * slice_stride + subslice * subslice_stride;
+}
+
+static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
+ int subslice)
+{
+ int i, offset = sseu_eu_idx(sseu, slice, subslice);
+ u16 eu_mask = 0;
+
+ for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
+ eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
+ (i * BITS_PER_BYTE);
+ }
+
+ return eu_mask;
+}
+
+static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
+ u16 eu_mask)
+{
+ int i, offset = sseu_eu_idx(sseu, slice, subslice);
+
+ for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
+ sseu->eu_mask[offset + i] =
+ (eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
+ }
+}
+
void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
struct drm_printer *p)
{
@@ -126,7 +160,7 @@ void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
for (s = 0; s < sseu->max_slices; s++) {
drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n",
- s, hweight8(sseu->subslice_mask[s]),
+ s, intel_sseu_subslices_per_slice(sseu, s),
sseu->subslice_mask[s]);
for (ss = 0; ss < sseu->max_subslices; ss++) {
@@ -260,9 +294,10 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
* EU in any one subslice may be fused off for die
* recovery.
*/
- sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+ sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
DIV_ROUND_UP(sseu->eu_total,
- sseu_subslice_total(sseu)) : 0;
+ intel_sseu_subslice_total(sseu)) :
+ 0;
/* No restrictions on Power Gating */
sseu->has_slice_pg = 1;
@@ -310,8 +345,9 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
* CHV expected to always have a uniform distribution of EU
* across subslices.
*/
- sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
- sseu->eu_total / sseu_subslice_total(sseu) :
+ sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
+ sseu->eu_total /
+ intel_sseu_subslice_total(sseu) :
0;
/*
* CHV supports subslice power gating on devices with more than
@@ -319,7 +355,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
* more than one EU pair per subslice.
*/
sseu->has_slice_pg = 0;
- sseu->has_subslice_pg = sseu_subslice_total(sseu) > 1;
+ sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
}
@@ -393,9 +429,10 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
* recovery. BXT is expected to be perfectly uniform in EU
* distribution.
*/
- sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+ sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
DIV_ROUND_UP(sseu->eu_total,
- sseu_subslice_total(sseu)) : 0;
+ intel_sseu_subslice_total(sseu)) :
+ 0;
/*
* SKL+ supports slice power gating on devices with more than
* one slice, and supports EU power gating on devices with
@@ -407,7 +444,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->has_slice_pg =
!IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1;
sseu->has_subslice_pg =
- IS_GEN9_LP(dev_priv) && sseu_subslice_total(sseu) > 1;
+ IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1;
sseu->has_eu_pg = sseu->eu_per_subslice > 2;
if (IS_GEN9_LP(dev_priv)) {
@@ -496,9 +533,10 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
* subslices with the exception that any one EU in any one subslice may
* be fused off for die recovery.
*/
- sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+ sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
DIV_ROUND_UP(sseu->eu_total,
- sseu_subslice_total(sseu)) : 0;
+ intel_sseu_subslice_total(sseu)) :
+ 0;
/*
* BDW supports slice power gating on devices with more than
@@ -735,7 +773,7 @@ static const u16 subplatform_ult_ids[] = {
INTEL_CFL_U_GT3_IDS(0),
INTEL_WHL_U_GT1_IDS(0),
INTEL_WHL_U_GT2_IDS(0),
- INTEL_WHL_U_GT3_IDS(0)
+ INTEL_WHL_U_GT3_IDS(0),
};
static const u16 subplatform_ulx_ids[] = {
@@ -748,17 +786,14 @@ static const u16 subplatform_ulx_ids[] = {
INTEL_SKL_ULX_GT1_IDS(0),
INTEL_SKL_ULX_GT2_IDS(0),
INTEL_KBL_ULX_GT1_IDS(0),
- INTEL_KBL_ULX_GT2_IDS(0)
-};
-
-static const u16 subplatform_aml_ids[] = {
+ INTEL_KBL_ULX_GT2_IDS(0),
INTEL_AML_KBL_GT2_IDS(0),
- INTEL_AML_CFL_GT2_IDS(0)
+ INTEL_AML_CFL_GT2_IDS(0),
};
static const u16 subplatform_portf_ids[] = {
INTEL_CNL_PORT_F_IDS(0),
- INTEL_ICL_PORT_F_IDS(0)
+ INTEL_ICL_PORT_F_IDS(0),
};
static bool find_devid(u16 id, const u16 *p, unsigned int num)
@@ -794,9 +829,6 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915)
/* ULX machines are also considered ULT. */
mask |= BIT(INTEL_SUBPLATFORM_ULT);
}
- } else if (find_devid(devid, subplatform_aml_ids,
- ARRAY_SIZE(subplatform_aml_ids))) {
- mask = BIT(INTEL_SUBPLATFORM_AML);
} else if (find_devid(devid, subplatform_portf_ids,
ARRAY_SIZE(subplatform_portf_ids))) {
mask = BIT(INTEL_SUBPLATFORM_PORTF);
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 0e579f158016..ddafc819bf30 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -27,8 +27,11 @@
#include <uapi/drm/i915_drm.h>
-#include "intel_engine_types.h"
-#include "intel_display.h"
+#include "display/intel_display.h"
+
+#include "gt/intel_engine_types.h"
+#include "gt/intel_context_types.h"
+#include "gt/intel_sseu.h"
struct drm_printer;
struct drm_i915_private;
@@ -88,7 +91,6 @@ enum intel_platform {
/* HSW/BDW/SKL/KBL/CFL */
#define INTEL_SUBPLATFORM_ULT (0)
#define INTEL_SUBPLATFORM_ULX (1)
-#define INTEL_SUBPLATFORM_AML (2)
/* CNL/ICL */
#define INTEL_SUBPLATFORM_PORTF (0)
@@ -102,14 +104,13 @@ enum intel_ppgtt_type {
#define DEV_INFO_FOR_EACH_FLAG(func) \
func(is_mobile); \
func(is_lp); \
- func(is_alpha_support); \
+ func(require_force_probe); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
func(gpu_reset_clobbers_display); \
func(has_reset_engine); \
func(has_fpga_dbg); \
func(has_guc); \
- func(has_guc_ct); \
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
@@ -118,6 +119,7 @@ enum intel_ppgtt_type {
func(has_pooled_eu); \
func(has_rc6); \
func(has_rc6p); \
+ func(has_rps); \
func(has_runtime_pm); \
func(has_snoop); \
func(has_coherent_ggtt); \
@@ -139,33 +141,6 @@ enum intel_ppgtt_type {
func(overlay_needs_physical); \
func(supports_tv);
-#define GEN_MAX_SLICES (6) /* CNL upper bound */
-#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */
-
-struct sseu_dev_info {
- u8 slice_mask;
- u8 subslice_mask[GEN_MAX_SLICES];
- u16 eu_total;
- u8 eu_per_subslice;
- u8 min_eu_in_pool;
- /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
- u8 subslice_7eu[3];
- u8 has_slice_pg:1;
- u8 has_subslice_pg:1;
- u8 has_eu_pg:1;
-
- /* Topology fields */
- u8 max_slices;
- u8 max_subslices;
- u8 max_eus_per_subslice;
-
- /* We don't have more than 8 eus per subslice at the moment and as we
- * store eus enabled using bits, no need to multiply by eus per
- * subslice.
- */
- u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES];
-};
-
struct intel_device_info {
u16 gen_mask;
@@ -202,8 +177,8 @@ struct intel_device_info {
int cursor_offsets[I915_MAX_PIPES];
struct color_luts {
- u16 degamma_lut_size;
- u16 gamma_lut_size;
+ u32 degamma_lut_size;
+ u32 gamma_lut_size;
u32 degamma_lut_tests;
u32 gamma_lut_tests;
} color;
@@ -241,53 +216,6 @@ struct intel_driver_caps {
bool has_logical_contexts:1;
};
-static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
-{
- unsigned int i, total = 0;
-
- for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask); i++)
- total += hweight8(sseu->subslice_mask[i]);
-
- return total;
-}
-
-static inline int sseu_eu_idx(const struct sseu_dev_info *sseu,
- int slice, int subslice)
-{
- int subslice_stride = DIV_ROUND_UP(sseu->max_eus_per_subslice,
- BITS_PER_BYTE);
- int slice_stride = sseu->max_subslices * subslice_stride;
-
- return slice * slice_stride + subslice * subslice_stride;
-}
-
-static inline u16 sseu_get_eus(const struct sseu_dev_info *sseu,
- int slice, int subslice)
-{
- int i, offset = sseu_eu_idx(sseu, slice, subslice);
- u16 eu_mask = 0;
-
- for (i = 0;
- i < DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); i++) {
- eu_mask |= ((u16) sseu->eu_mask[offset + i]) <<
- (i * BITS_PER_BYTE);
- }
-
- return eu_mask;
-}
-
-static inline void sseu_set_eus(struct sseu_dev_info *sseu,
- int slice, int subslice, u16 eu_mask)
-{
- int i, offset = sseu_eu_idx(sseu, slice, subslice);
-
- for (i = 0;
- i < DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); i++) {
- sseu->eu_mask[offset + i] =
- (eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
- }
-}
-
const char *intel_platform_name(enum intel_platform platform);
void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e85cd377a652..1d58f7ec5d84 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -28,7 +28,6 @@
#include <linux/async.h>
#include <linux/i2c.h>
#include <linux/sched/clock.h>
-#include <linux/stackdepot.h>
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
@@ -47,127 +46,10 @@
struct drm_printer;
-/**
- * __wait_for - magic wait macro
- *
- * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
- * important that we check the condition again after having timed out, since the
- * timeout could be due to preemption or similar and we've never had a chance to
- * check the condition before the timeout.
- */
-#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
- const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
- long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
- int ret__; \
- might_sleep(); \
- for (;;) { \
- const bool expired__ = ktime_after(ktime_get_raw(), end__); \
- OP; \
- /* Guarantee COND check prior to timeout */ \
- barrier(); \
- if (COND) { \
- ret__ = 0; \
- break; \
- } \
- if (expired__) { \
- ret__ = -ETIMEDOUT; \
- break; \
- } \
- usleep_range(wait__, wait__ * 2); \
- if (wait__ < (Wmax)) \
- wait__ <<= 1; \
- } \
- ret__; \
-})
-
-#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
- (Wmax))
-#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
-
-/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
-#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
-# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
-#else
-# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
-#endif
-
-#define _wait_for_atomic(COND, US, ATOMIC) \
-({ \
- int cpu, ret, timeout = (US) * 1000; \
- u64 base; \
- _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
- if (!(ATOMIC)) { \
- preempt_disable(); \
- cpu = smp_processor_id(); \
- } \
- base = local_clock(); \
- for (;;) { \
- u64 now = local_clock(); \
- if (!(ATOMIC)) \
- preempt_enable(); \
- /* Guarantee COND check prior to timeout */ \
- barrier(); \
- if (COND) { \
- ret = 0; \
- break; \
- } \
- if (now - base >= timeout) { \
- ret = -ETIMEDOUT; \
- break; \
- } \
- cpu_relax(); \
- if (!(ATOMIC)) { \
- preempt_disable(); \
- if (unlikely(cpu != smp_processor_id())) { \
- timeout -= now - base; \
- cpu = smp_processor_id(); \
- base = local_clock(); \
- } \
- } \
- } \
- ret; \
-})
-
-#define wait_for_us(COND, US) \
-({ \
- int ret__; \
- BUILD_BUG_ON(!__builtin_constant_p(US)); \
- if ((US) > 10) \
- ret__ = _wait_for((COND), (US), 10, 10); \
- else \
- ret__ = _wait_for_atomic((COND), (US), 0); \
- ret__; \
-})
-
-#define wait_for_atomic_us(COND, US) \
-({ \
- BUILD_BUG_ON(!__builtin_constant_p(US)); \
- BUILD_BUG_ON((US) > 50000); \
- _wait_for_atomic((COND), (US), 1); \
-})
-
-#define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000)
-
-#define KHz(x) (1000 * (x))
-#define MHz(x) KHz(1000 * (x))
-
-#define KBps(x) (1000 * (x))
-#define MBps(x) KBps(1000 * (x))
-#define GBps(x) ((u64)1000 * MBps((x)))
-
/*
* Display related stuff
*/
-/* store information about an Ixxx DVO */
-/* The i830->i865 use multiple DVOs with multiple i2cs */
-/* the i915, i945 have a single sDVO i2c bus - which is different */
-#define MAX_OUTPUTS 6
-/* maximum connectors per crtcs in the mode set */
-
-#define INTEL_I2C_BUS_DVO 1
-#define INTEL_I2C_BUS_SDVO 2
-
/* these are outputs from the chip - integrated only
external chips are via DVO or SDVO output */
enum intel_output_type {
@@ -185,14 +67,6 @@ enum intel_output_type {
INTEL_OUTPUT_DP_MST = 11,
};
-#define INTEL_DVO_CHIP_NONE 0
-#define INTEL_DVO_CHIP_LVDS 1
-#define INTEL_DVO_CHIP_TMDS 2
-#define INTEL_DVO_CHIP_TVOUT 4
-
-#define INTEL_DSI_VIDEO_MODE 0
-#define INTEL_DSI_COMMAND_MODE 1
-
struct intel_framebuffer {
struct drm_framebuffer base;
struct intel_rotation_info rot_info;
@@ -546,6 +420,8 @@ struct dpll {
struct intel_atomic_state {
struct drm_atomic_state base;
+ intel_wakeref_t wakeref;
+
struct {
/*
* Logical state of cdclk (used for all scaling, watermark,
@@ -677,21 +553,6 @@ struct intel_initial_plane_config {
u8 rotation;
};
-#define SKL_MIN_SRC_W 8
-#define SKL_MAX_SRC_W 4096
-#define SKL_MIN_SRC_H 8
-#define SKL_MAX_SRC_H 4096
-#define SKL_MIN_DST_W 8
-#define SKL_MAX_DST_W 4096
-#define SKL_MIN_DST_H 8
-#define SKL_MAX_DST_H 4096
-#define ICL_MAX_SRC_W 5120
-#define ICL_MAX_SRC_H 4096
-#define ICL_MAX_DST_W 5120
-#define ICL_MAX_DST_H 4096
-#define SKL_MIN_YUV_420_SRC_W 16
-#define SKL_MIN_YUV_420_SRC_H 16
-
struct intel_scaler {
int in_use;
u32 mode;
@@ -1026,6 +887,8 @@ struct intel_crtc_state {
struct intel_crtc_wm_state wm;
+ u32 data_rate[I915_MAX_PLANES];
+
/* Gamma mode programmed on the pipe */
u32 gamma_mode;
@@ -1051,6 +914,7 @@ struct intel_crtc_state {
union hdmi_infoframe avi;
union hdmi_infoframe spd;
union hdmi_infoframe hdmi;
+ union hdmi_infoframe drm;
} infoframes;
/* HDMI scrambling status */
@@ -1581,56 +1445,6 @@ intel_atomic_get_new_crtc_state(struct intel_atomic_state *state,
&crtc->base));
}
-/* intel_fifo_underrun.c */
-bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool enable);
-bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
- enum pipe pch_transcoder,
- bool enable);
-void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
- enum pipe pipe);
-void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
- enum pipe pch_transcoder);
-void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
-void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
-
-/* i915_irq.c */
-void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
-void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
-void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
-void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
-void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
-
-static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
- u32 mask)
-{
- return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
-}
-
-void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
-static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
-{
- /*
- * We only use drm_irq_uninstall() at unload and VT switch, so
- * this is the only thing we need to check.
- */
- return dev_priv->runtime_pm.irqs_enabled;
-}
-
-int intel_get_crtc_scanline(struct intel_crtc *crtc);
-void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
- u8 pipe_mask);
-void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
- u8 pipe_mask);
-void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
-void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
-void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
-
/* intel_display.c */
void intel_plane_destroy(struct drm_plane *plane);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
@@ -1652,9 +1466,8 @@ unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
void intel_add_fb_offsets(int *x, int *y,
const struct intel_plane_state *state, int plane);
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
+unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
-void intel_mark_busy(struct drm_i915_private *dev_priv);
-void intel_mark_idle(struct drm_i915_private *dev_priv);
int intel_display_suspend(struct drm_device *dev);
void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
void intel_encoder_destroy(struct drm_encoder *encoder);
@@ -1722,18 +1535,6 @@ int intel_prepare_plane_fb(struct drm_plane *plane,
struct drm_plane_state *new_state);
void intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_plane_state *old_state);
-int intel_plane_atomic_get_property(struct drm_plane *plane,
- const struct drm_plane_state *state,
- struct drm_property *property,
- u64 *val);
-int intel_plane_atomic_set_property(struct drm_plane *plane,
- struct drm_plane_state *state,
- struct drm_property *property,
- u64 val);
-int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
- struct drm_crtc_state *crtc_state,
- const struct intel_plane_state *old_plane_state,
- struct drm_plane_state *plane_state);
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe);
@@ -1763,18 +1564,12 @@ void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
void intel_prepare_reset(struct drm_i915_private *dev_priv);
void intel_finish_reset(struct drm_i915_private *dev_priv);
-void hsw_enable_pc8(struct drm_i915_private *dev_priv);
-void hsw_disable_pc8(struct drm_i915_private *dev_priv);
-void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
-void bxt_enable_dc9(struct drm_i915_private *dev_priv);
-void bxt_disable_dc9(struct drm_i915_private *dev_priv);
-void gen9_enable_dc5(struct drm_i915_private *dev_priv);
-unsigned int skl_cdclk_get_vco(unsigned int freq);
-void skl_enable_dc6(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
enum link_m_n_set m_n);
+void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
@@ -1816,230 +1611,6 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
-
-/* intel_dp_link_training.c */
-void intel_dp_start_link_train(struct intel_dp *intel_dp);
-void intel_dp_stop_link_train(struct intel_dp *intel_dp);
-
-/* intel_vdsc.c */
-int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config);
-enum intel_display_power_domain
-intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
-
-/* intel_dp_aux_backlight.c */
-int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
-
-/* intel_dp_mst.c */
-int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
-void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
-/* vlv_dsi.c */
-void vlv_dsi_init(struct drm_i915_private *dev_priv);
-
-/* icl_dsi.c */
-void icl_dsi_init(struct drm_i915_private *dev_priv);
-
-/* intel_dsi_dcs_backlight.c */
-int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
-
-/* intel_hotplug.c */
-void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
-bool intel_encoder_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector);
-
-/* intel_overlay.c */
-void intel_overlay_setup(struct drm_i915_private *dev_priv);
-void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
-int intel_overlay_switch_off(struct intel_overlay *overlay);
-int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-void intel_overlay_reset(struct drm_i915_private *dev_priv);
-
-/* intel_quirks.c */
-void intel_init_quirks(struct drm_i915_private *dev_priv);
-
-/* intel_runtime_pm.c */
-void intel_runtime_pm_init_early(struct drm_i915_private *dev_priv);
-int intel_power_domains_init(struct drm_i915_private *);
-void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
-void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
-void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
-void icl_display_core_uninit(struct drm_i915_private *dev_priv);
-void intel_power_domains_enable(struct drm_i915_private *dev_priv);
-void intel_power_domains_disable(struct drm_i915_private *dev_priv);
-
-enum i915_drm_suspend_mode {
- I915_DRM_SUSPEND_IDLE,
- I915_DRM_SUSPEND_MEM,
- I915_DRM_SUSPEND_HIBERNATE,
-};
-
-void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
- enum i915_drm_suspend_mode);
-void intel_power_domains_resume(struct drm_i915_private *dev_priv);
-void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
-void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_disable(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_cleanup(struct drm_i915_private *dev_priv);
-const char *
-intel_display_power_domain_str(enum intel_display_power_domain domain);
-
-bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-intel_wakeref_t
-intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-void intel_display_power_put(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain,
- intel_wakeref_t wakeref);
-#else
-#define intel_display_power_put(i915, domain, wakeref) \
- intel_display_power_put_unchecked(i915, domain)
-#endif
-void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
- u8 req_slices);
-
-static inline void
-assert_rpm_device_not_suspended(struct i915_runtime_pm *rpm)
-{
- WARN_ONCE(rpm->suspended,
- "Device suspended during HW access\n");
-}
-
-static inline void
-__assert_rpm_wakelock_held(struct i915_runtime_pm *rpm)
-{
- assert_rpm_device_not_suspended(rpm);
- WARN_ONCE(!atomic_read(&rpm->wakeref_count),
- "RPM wakelock ref not held during HW access");
-}
-
-static inline void
-assert_rpm_wakelock_held(struct drm_i915_private *i915)
-{
- __assert_rpm_wakelock_held(&i915->runtime_pm);
-}
-
-/**
- * disable_rpm_wakeref_asserts - disable the RPM assert checks
- * @i915: i915 device instance
- *
- * This function disable asserts that check if we hold an RPM wakelock
- * reference, while keeping the device-not-suspended checks still enabled.
- * It's meant to be used only in special circumstances where our rule about
- * the wakelock refcount wrt. the device power state doesn't hold. According
- * to this rule at any point where we access the HW or want to keep the HW in
- * an active state we must hold an RPM wakelock reference acquired via one of
- * the intel_runtime_pm_get() helpers. Currently there are a few special spots
- * where this rule doesn't hold: the IRQ and suspend/resume handlers, the
- * forcewake release timer, and the GPU RPS and hangcheck works. All other
- * users should avoid using this function.
- *
- * Any calls to this function must have a symmetric call to
- * enable_rpm_wakeref_asserts().
- */
-static inline void
-disable_rpm_wakeref_asserts(struct drm_i915_private *i915)
-{
- atomic_inc(&i915->runtime_pm.wakeref_count);
-}
-
-/**
- * enable_rpm_wakeref_asserts - re-enable the RPM assert checks
- * @i915: i915 device instance
- *
- * This function re-enables the RPM assert checks after disabling them with
- * disable_rpm_wakeref_asserts. It's meant to be used only in special
- * circumstances otherwise its use should be avoided.
- *
- * Any calls to this function must have a symmetric call to
- * disable_rpm_wakeref_asserts().
- */
-static inline void
-enable_rpm_wakeref_asserts(struct drm_i915_private *i915)
-{
- atomic_dec(&i915->runtime_pm.wakeref_count);
-}
-
-intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915);
-intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915);
-intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915);
-
-#define with_intel_runtime_pm(i915, wf) \
- for ((wf) = intel_runtime_pm_get(i915); (wf); \
- intel_runtime_pm_put((i915), (wf)), (wf) = 0)
-
-#define with_intel_runtime_pm_if_in_use(i915, wf) \
- for ((wf) = intel_runtime_pm_get_if_in_use(i915); (wf); \
- intel_runtime_pm_put((i915), (wf)), (wf) = 0)
-
-void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915);
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref);
-#else
-#define intel_runtime_pm_put(i915, wref) intel_runtime_pm_put_unchecked(i915)
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
- struct drm_printer *p);
-#else
-static inline void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
- struct drm_printer *p)
-{
-}
-#endif
-
-void chv_phy_powergate_lanes(struct intel_encoder *encoder,
- bool override, unsigned int mask);
-bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
- enum dpio_channel ch, bool override);
-
-/* intel_atomic.c */
-int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
- const struct drm_connector_state *state,
- struct drm_property *property,
- u64 *val);
-int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
- struct drm_connector_state *state,
- struct drm_property *property,
- u64 val);
-int intel_digital_connector_atomic_check(struct drm_connector *conn,
- struct drm_connector_state *new_state);
-struct drm_connector_state *
-intel_digital_connector_duplicate_state(struct drm_connector *connector);
-
-struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
-void intel_crtc_destroy_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state);
-struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
-void intel_atomic_state_clear(struct drm_atomic_state *);
-
-static inline struct intel_crtc_state *
-intel_atomic_get_crtc_state(struct drm_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_crtc_state *crtc_state;
- crtc_state = drm_atomic_get_crtc_state(state, &crtc->base);
- if (IS_ERR(crtc_state))
- return ERR_CAST(crtc_state);
-
- return to_intel_crtc_state(crtc_state);
-}
-
-int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state);
+int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 3aabfa2d9198..c40a6efdd33a 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -34,6 +34,13 @@ static void gen8_guc_raise_irq(struct intel_guc *guc)
I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
}
+static void gen11_guc_raise_irq(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ I915_WRITE(GEN11_GUC_HOST_INTERRUPT, 0);
+}
+
static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
{
GEM_BUG_ON(!guc->send_regs.base);
@@ -49,9 +56,15 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
enum forcewake_domains fw_domains = 0;
unsigned int i;
- guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
- guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
- BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
+ if (INTEL_GEN(dev_priv) >= 11) {
+ guc->send_regs.base =
+ i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
+ guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
+ } else {
+ guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
+ guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
+ BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
+ }
for (i = 0; i < guc->send_regs.count; i++) {
fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore,
@@ -63,6 +76,8 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
void intel_guc_init_early(struct intel_guc *guc)
{
+ struct drm_i915_private *i915 = guc_to_i915(guc);
+
intel_guc_fw_init_early(guc);
intel_guc_ct_init_early(&guc->ct);
intel_guc_log_init_early(&guc->log);
@@ -71,7 +86,17 @@ void intel_guc_init_early(struct intel_guc *guc)
spin_lock_init(&guc->irq_lock);
guc->send = intel_guc_send_nop;
guc->handler = intel_guc_to_host_event_handler_nop;
- guc->notify = gen8_guc_raise_irq;
+ if (INTEL_GEN(i915) >= 11) {
+ guc->notify = gen11_guc_raise_irq;
+ guc->interrupts.reset = gen11_reset_guc_interrupts;
+ guc->interrupts.enable = gen11_enable_guc_interrupts;
+ guc->interrupts.disable = gen11_disable_guc_interrupts;
+ } else {
+ guc->notify = gen8_guc_raise_irq;
+ guc->interrupts.reset = gen9_reset_guc_interrupts;
+ guc->interrupts.enable = gen9_enable_guc_interrupts;
+ guc->interrupts.disable = gen9_disable_guc_interrupts;
+ }
}
static int guc_init_wq(struct intel_guc *guc)
@@ -154,7 +179,7 @@ int intel_guc_init_misc(struct intel_guc *guc)
void intel_guc_fini_misc(struct intel_guc *guc)
{
- intel_uc_fw_fini(&guc->fw);
+ intel_uc_fw_cleanup_fetch(&guc->fw);
guc_fini_wq(guc);
}
@@ -189,9 +214,13 @@ int intel_guc_init(struct intel_guc *guc)
struct drm_i915_private *dev_priv = guc_to_i915(guc);
int ret;
- ret = guc_shared_data_create(guc);
+ ret = intel_uc_fw_init(&guc->fw);
if (ret)
goto err_fetch;
+
+ ret = guc_shared_data_create(guc);
+ if (ret)
+ goto err_fw;
GEM_BUG_ON(!guc->shared_data);
ret = intel_guc_log_create(&guc->log);
@@ -203,11 +232,9 @@ int intel_guc_init(struct intel_guc *guc)
goto err_log;
GEM_BUG_ON(!guc->ads_vma);
- if (HAS_GUC_CT(dev_priv)) {
- ret = intel_guc_ct_init(&guc->ct);
- if (ret)
- goto err_ads;
- }
+ ret = intel_guc_ct_init(&guc->ct);
+ if (ret)
+ goto err_ads;
/* We need to notify the guc whenever we change the GGTT */
i915_ggtt_enable_guc(dev_priv);
@@ -220,8 +247,10 @@ err_log:
intel_guc_log_destroy(&guc->log);
err_shared:
guc_shared_data_destroy(guc);
-err_fetch:
+err_fw:
intel_uc_fw_fini(&guc->fw);
+err_fetch:
+ intel_uc_fw_cleanup_fetch(&guc->fw);
return ret;
}
@@ -231,26 +260,19 @@ void intel_guc_fini(struct intel_guc *guc)
i915_ggtt_disable_guc(dev_priv);
- if (HAS_GUC_CT(dev_priv))
- intel_guc_ct_fini(&guc->ct);
+ intel_guc_ct_fini(&guc->ct);
intel_guc_ads_destroy(guc);
intel_guc_log_destroy(&guc->log);
guc_shared_data_destroy(guc);
intel_uc_fw_fini(&guc->fw);
+ intel_uc_fw_cleanup_fetch(&guc->fw);
}
static u32 guc_ctl_debug_flags(struct intel_guc *guc)
{
u32 level = intel_guc_log_get_level(&guc->log);
- u32 flags;
- u32 ads;
-
- ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
- flags = ads << GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED;
-
- if (!GUC_LOG_LEVEL_IS_ENABLED(level))
- flags |= GUC_LOG_DEFAULT_DISABLED;
+ u32 flags = 0;
if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
flags |= GUC_LOG_DISABLED;
@@ -265,11 +287,7 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{
u32 flags = 0;
- flags |= GUC_CTL_VCS2_ENABLED;
-
- if (USES_GUC_SUBMISSION(guc_to_i915(guc)))
- flags |= GUC_CTL_KERNEL_SUBMISSIONS;
- else
+ if (!USES_GUC_SUBMISSION(guc_to_i915(guc)))
flags |= GUC_CTL_DISABLE_SCHEDULER;
return flags;
@@ -333,6 +351,14 @@ static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
return flags;
}
+static u32 guc_ctl_ads_flags(struct intel_guc *guc)
+{
+ u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
+ u32 flags = ads << GUC_ADS_ADDR_SHIFT;
+
+ return flags;
+}
+
/*
* Initialise the GuC parameter block before starting the firmware
* transfer. These parameters are read by the firmware on startup
@@ -346,20 +372,11 @@ void intel_guc_init_params(struct intel_guc *guc)
memset(params, 0, sizeof(params));
- /*
- * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
- * second. This ARAR is calculated by:
- * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
- */
- params[GUC_CTL_ARAT_HIGH] = 0;
- params[GUC_CTL_ARAT_LOW] = 100000000;
-
- params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
-
+ params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
+ params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
- params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
- params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
+ params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
@@ -410,9 +427,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);
/* If CT is available, we expect to use MMIO only during init/fini */
- GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
- *action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
- *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
+ GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
+ *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
mutex_lock(&guc->send_mutex);
intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
@@ -461,33 +477,6 @@ out:
return ret;
}
-void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- u32 msg, val;
-
- /*
- * Sample the log buffer flush related bits & clear them out now
- * itself from the message identity register to minimize the
- * probability of losing a flush interrupt, when there are back
- * to back flush interrupts.
- * There can be a new flush interrupt, for different log buffer
- * type (like for ISR), whilst Host is handling one (for DPC).
- * Since same bit is used in message register for ISR & DPC, it
- * could happen that GuC sets the bit for 2nd interrupt but Host
- * clears out the bit on handling the 1st interrupt.
- */
- disable_rpm_wakeref_asserts(dev_priv);
- spin_lock(&guc->irq_lock);
- val = I915_READ(SOFT_SCRATCH(15));
- msg = val & guc->msg_enabled_mask;
- I915_WRITE(SOFT_SCRATCH(15), val & ~msg);
- spin_unlock(&guc->irq_lock);
- enable_rpm_wakeref_asserts(dev_priv);
-
- intel_guc_to_host_process_recv_msg(guc, &msg, 1);
-}
-
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
const u32 *payload, u32 len)
{
@@ -543,25 +532,33 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-/*
- * The ENTER/EXIT_S_STATE actions queue the save/restore operation in GuC FW and
- * then return, so waiting on the H2G is not enough to guarantee GuC is done.
- * When all the processing is done, GuC writes INTEL_GUC_SLEEP_STATE_SUCCESS to
- * scratch register 14, so we can poll on that. Note that GuC does not ensure
- * that the value in the register is different from
- * INTEL_GUC_SLEEP_STATE_SUCCESS while the action is in progress so we need to
- * take care of that ourselves as well.
+/**
+ * intel_guc_suspend() - notify GuC entering suspend state
+ * @guc: the guc
*/
-static int guc_sleep_state_action(struct intel_guc *guc,
- const u32 *action, u32 len)
+int intel_guc_suspend(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
int ret;
u32 status;
+ u32 action[] = {
+ INTEL_GUC_ACTION_ENTER_S_STATE,
+ GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
+ };
+
+ /*
+ * The ENTER_S_STATE action queues the save/restore operation in GuC FW
+ * and then returns, so waiting on the H2G is not enough to guarantee
+ * GuC is done. When all the processing is done, GuC writes
+ * INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll
+ * on that. Note that GuC does not ensure that the value in the register
+ * is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is
+ * in progress so we need to take care of that ourselves as well.
+ */
I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK);
- ret = intel_guc_send(guc, action, len);
+ ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
if (ret)
return ret;
@@ -582,21 +579,6 @@ static int guc_sleep_state_action(struct intel_guc *guc,
}
/**
- * intel_guc_suspend() - notify GuC entering suspend state
- * @guc: the guc
- */
-int intel_guc_suspend(struct intel_guc *guc)
-{
- u32 data[] = {
- INTEL_GUC_ACTION_ENTER_S_STATE,
- GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
- intel_guc_ggtt_offset(guc, guc->shared_data)
- };
-
- return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
-}
-
-/**
* intel_guc_reset_engine() - ask GuC to reset an engine
* @guc: intel_guc structure
* @engine: engine to be reset
@@ -625,13 +607,12 @@ int intel_guc_reset_engine(struct intel_guc *guc,
*/
int intel_guc_resume(struct intel_guc *guc)
{
- u32 data[] = {
+ u32 action[] = {
INTEL_GUC_ACTION_EXIT_S_STATE,
GUC_POWER_D0,
- intel_guc_ggtt_offset(guc, guc->shared_data)
};
- return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
/**
@@ -683,7 +664,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
u64 flags;
int ret;
- obj = i915_gem_object_create(dev_priv, size);
+ obj = i915_gem_object_create_shmem(dev_priv, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
@@ -704,20 +685,3 @@ err:
i915_gem_object_put(obj);
return vma;
}
-
-/**
- * intel_guc_reserved_gtt_size()
- * @guc: intel_guc structure
- *
- * The GuC WOPCM mapping shadows the lower part of the GGTT, so if we are using
- * GuC we can't have any objects pinned in that region. This function returns
- * the size of the shadowed region.
- *
- * Returns:
- * 0 if GuC is not present or not in use.
- * Otherwise, the GuC WOPCM size.
- */
-u32 intel_guc_reserved_gtt_size(struct intel_guc *guc)
-{
- return guc_to_i915(guc)->wopcm.guc.size;
-}
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 2c59ff8d9f39..08c906abdfa2 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -55,9 +55,15 @@ struct intel_guc {
/* intel_guc_recv interrupt related state */
spinlock_t irq_lock;
- bool interrupts_enabled;
unsigned int msg_enabled_mask;
+ struct {
+ bool enabled;
+ void (*reset)(struct drm_i915_private *i915);
+ void (*enable)(struct drm_i915_private *i915);
+ void (*disable)(struct drm_i915_private *i915);
+ } interrupts;
+
struct i915_vma *ads_vma;
struct i915_vma *stage_desc_pool;
void *stage_desc_pool_vaddr;
@@ -96,11 +102,6 @@ struct intel_guc {
void (*notify)(struct intel_guc *guc);
};
-static inline bool intel_guc_is_alive(struct intel_guc *guc)
-{
- return intel_uc_fw_is_loaded(&guc->fw);
-}
-
static
inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
{
@@ -164,7 +165,6 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size);
void intel_guc_to_host_event_handler(struct intel_guc *guc);
void intel_guc_to_host_event_handler_nop(struct intel_guc *guc);
-void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc);
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
const u32 *payload, u32 len);
int intel_guc_sample_forcewake(struct intel_guc *guc);
@@ -172,7 +172,11 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
int intel_guc_suspend(struct intel_guc *guc);
int intel_guc_resume(struct intel_guc *guc);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
-u32 intel_guc_reserved_gtt_size(struct intel_guc *guc);
+
+static inline bool intel_guc_is_loaded(struct intel_guc *guc)
+{
+ return intel_uc_fw_is_loaded(&guc->fw);
+}
static inline int intel_guc_sanitize(struct intel_guc *guc)
{
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c
index bec62f34b15a..ecb69fc94218 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/intel_guc_ads.c
@@ -51,7 +51,7 @@ static void guc_policies_init(struct guc_policies *policies)
policies->max_num_work_items = POLICY_MAX_NUM_WI;
for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) {
- for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
+ for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++) {
policy = &policies->policy[p][i];
guc_policy_init(policy);
@@ -61,91 +61,142 @@ static void guc_policies_init(struct guc_policies *policies)
policies->is_valid = 1;
}
+static void guc_ct_pool_entries_init(struct guc_ct_pool_entry *pool, u32 num)
+{
+ memset(pool, 0, num * sizeof(*pool));
+}
+
/*
* The first 80 dwords of the register state context, containing the
* execlists and ppgtt registers.
*/
#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
-/**
- * intel_guc_ads_create() - creates GuC ADS
- * @guc: intel_guc struct
- *
- */
-int intel_guc_ads_create(struct intel_guc *guc)
+/* The ads obj includes the struct itself and buffers passed to GuC */
+struct __guc_ads_blob {
+ struct guc_ads ads;
+ struct guc_policies policies;
+ struct guc_mmio_reg_state reg_state;
+ struct guc_gt_system_info system_info;
+ struct guc_clients_info clients_info;
+ struct guc_ct_pool_entry ct_pool[GUC_CT_POOL_SIZE];
+ u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
+} __packed;
+
+static int __guc_ads_init(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct i915_vma *vma, *kernel_ctx_vma;
- struct page *page;
- /* The ads obj includes the struct itself and buffers passed to GuC */
- struct {
- struct guc_ads ads;
- struct guc_policies policies;
- struct guc_mmio_reg_state reg_state;
- u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
- } __packed *blob;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- const u32 skipped_offset = LRC_HEADER_PAGES * PAGE_SIZE;
+ struct __guc_ads_blob *blob;
const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
u32 base;
+ u8 engine_class;
- GEM_BUG_ON(guc->ads_vma);
-
- vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- guc->ads_vma = vma;
-
- page = i915_vma_first_page(vma);
- blob = kmap(page);
+ blob = i915_gem_object_pin_map(guc->ads_vma->obj, I915_MAP_WB);
+ if (IS_ERR(blob))
+ return PTR_ERR(blob);
/* GuC scheduling policies */
guc_policies_init(&blob->policies);
- /* MMIO reg state */
- for_each_engine(engine, dev_priv, id) {
- blob->reg_state.white_list[engine->guc_id].mmio_start =
- engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
-
- /* Nothing to be saved or restored for now. */
- blob->reg_state.white_list[engine->guc_id].count = 0;
- }
-
/*
- * The GuC requires a "Golden Context" when it reinitialises
- * engines after a reset. Here we use the Render ring default
- * context, which must already exist and be pinned in the GGTT,
- * so its address won't change after we've told the GuC where
- * to find it. Note that we have to skip our header (1 page),
- * because our GuC shared data is there.
+ * GuC expects a per-engine-class context image and size
+ * (minus hwsp and ring context). The context image will be
+ * used to reinitialize engines after a reset. It must exist
+ * and be pinned in the GGTT, so that the address won't change after
+ * we have told GuC where to find it. The context size will be used
+ * to validate that the LRC base + size fall within allowed GGTT.
*/
- kernel_ctx_vma = dev_priv->engine[RCS0]->kernel_context->state;
- blob->ads.golden_context_lrca =
- intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset;
+ for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
+ if (engine_class == OTHER_CLASS)
+ continue;
+ /*
+ * TODO: Set context pointer to default state to allow
+ * GuC to re-init guilty contexts after internal reset.
+ */
+ blob->ads.golden_context_lrca[engine_class] = 0;
+ blob->ads.eng_state_size[engine_class] =
+ intel_engine_context_size(dev_priv, engine_class) -
+ skipped_size;
+ }
- /*
- * The GuC expects us to exclude the portion of the context image that
- * it skips from the size it is to read. It starts reading from after
- * the execlist context (so skipping the first page [PPHWSP] and 80
- * dwords). Weird guc is weird.
- */
- for_each_engine(engine, dev_priv, id)
- blob->ads.eng_state_size[engine->guc_id] =
- engine->context_size - skipped_size;
+ /* System info */
+ blob->system_info.slice_enabled = hweight8(RUNTIME_INFO(dev_priv)->sseu.slice_mask);
+ blob->system_info.rcs_enabled = 1;
+ blob->system_info.bcs_enabled = 1;
+
+ blob->system_info.vdbox_enable_mask = VDBOX_MASK(dev_priv);
+ blob->system_info.vebox_enable_mask = VEBOX_MASK(dev_priv);
+ blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
+
+ base = intel_guc_ggtt_offset(guc, guc->ads_vma);
- base = intel_guc_ggtt_offset(guc, vma);
+ /* Clients info */
+ guc_ct_pool_entries_init(blob->ct_pool, ARRAY_SIZE(blob->ct_pool));
+
+ blob->clients_info.clients_num = 1;
+ blob->clients_info.ct_pool_addr = base + ptr_offset(blob, ct_pool);
+ blob->clients_info.ct_pool_count = ARRAY_SIZE(blob->ct_pool);
+
+ /* ADS */
blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer);
blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
+ blob->ads.gt_system_info = base + ptr_offset(blob, system_info);
+ blob->ads.clients_info = base + ptr_offset(blob, clients_info);
+
+ i915_gem_object_unpin_map(guc->ads_vma->obj);
- kunmap(page);
+ return 0;
+}
+
+/**
+ * intel_guc_ads_create() - allocates and initializes GuC ADS.
+ * @guc: intel_guc struct
+ *
+ * GuC needs memory block (Additional Data Struct), where it will store
+ * some data. Allocate and initialize such memory block for GuC use.
+ */
+int intel_guc_ads_create(struct intel_guc *guc)
+{
+ const u32 size = PAGE_ALIGN(sizeof(struct __guc_ads_blob));
+ struct i915_vma *vma;
+ int ret;
+
+ GEM_BUG_ON(guc->ads_vma);
+
+ vma = intel_guc_allocate_vma(guc, size);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ guc->ads_vma = vma;
+
+ ret = __guc_ads_init(guc);
+ if (ret)
+ goto err_vma;
return 0;
+
+err_vma:
+ i915_vma_unpin_and_release(&guc->ads_vma, 0);
+ return ret;
}
void intel_guc_ads_destroy(struct intel_guc *guc)
{
i915_vma_unpin_and_release(&guc->ads_vma, 0);
}
+
+/**
+ * intel_guc_ads_reset() - prepares GuC Additional Data Struct for reuse
+ * @guc: intel_guc struct
+ *
+ * GuC stores some data in ADS, which might be stale after a reset.
+ * Reinitialize whole ADS in case any part of it was corrupted during
+ * previous GuC run.
+ */
+void intel_guc_ads_reset(struct intel_guc *guc)
+{
+ if (!guc->ads_vma)
+ return;
+ __guc_ads_init(guc);
+}
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.h b/drivers/gpu/drm/i915/intel_guc_ads.h
index c4735742c564..7f40f9cd5fb9 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.h
+++ b/drivers/gpu/drm/i915/intel_guc_ads.h
@@ -29,5 +29,6 @@ struct intel_guc;
int intel_guc_ads_create(struct intel_guc *guc);
void intel_guc_ads_destroy(struct intel_guc *guc);
+void intel_guc_ads_reset(struct intel_guc *guc);
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index dde1dc0d6e69..3921809f812b 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -565,7 +565,7 @@ static inline unsigned int ct_header_get_action(u32 header)
static inline bool ct_header_is_response(u32 header)
{
- return ct_header_get_action(header) == INTEL_GUC_ACTION_DEFAULT;
+ return !!(header & GUC_CT_MSG_IS_RESPONSE);
}
static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
@@ -848,8 +848,6 @@ static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
* Allocate memory required for communication via
* the CT channel.
*
- * Shall only be called for platforms with HAS_GUC_CT.
- *
* Return: 0 on success, a negative errno code on failure.
*/
int intel_guc_ct_init(struct intel_guc_ct *ct)
@@ -875,8 +873,6 @@ int intel_guc_ct_init(struct intel_guc_ct *ct)
*
* Deallocate memory required for communication via
* the CT channel.
- *
- * Shall only be called for platforms with HAS_GUC_CT.
*/
void intel_guc_ct_fini(struct intel_guc_ct *ct)
{
@@ -890,19 +886,14 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct)
* intel_guc_ct_enable - Enable buffer based command transport.
* @ct: pointer to CT struct
*
- * Shall only be called for platforms with HAS_GUC_CT.
- *
* Return: 0 on success, a negative errno code on failure.
*/
int intel_guc_ct_enable(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
- struct drm_i915_private *i915 = guc_to_i915(guc);
struct intel_guc_ct_channel *ctch = &ct->host_channel;
int err;
- GEM_BUG_ON(!HAS_GUC_CT(i915));
-
if (ctch->enabled)
return 0;
@@ -920,17 +911,12 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
/**
* intel_guc_ct_disable - Disable buffer based command transport.
* @ct: pointer to CT struct
- *
- * Shall only be called for platforms with HAS_GUC_CT.
*/
void intel_guc_ct_disable(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
- struct drm_i915_private *i915 = guc_to_i915(guc);
struct intel_guc_ct_channel *ctch = &ct->host_channel;
- GEM_BUG_ON(!HAS_GUC_CT(i915));
-
if (!ctch->enabled)
return;
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h
index f5e7f0663304..41ba593a4df7 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/intel_guc_ct.h
@@ -96,4 +96,9 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct);
int intel_guc_ct_enable(struct intel_guc_ct *ct);
void intel_guc_ct_disable(struct intel_guc_ct *ct);
+static inline void intel_guc_ct_stop(struct intel_guc_ct *ct)
+{
+ ct->host_channel.enabled = false;
+}
+
#endif /* _INTEL_GUC_CT_H_ */
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index 792a551450c7..72cdafd9636a 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -30,53 +30,82 @@
#include "intel_guc_fw.h"
#include "i915_drv.h"
-#define SKL_FW_MAJOR 9
-#define SKL_FW_MINOR 33
-
-#define BXT_FW_MAJOR 9
-#define BXT_FW_MINOR 29
-
-#define KBL_FW_MAJOR 9
-#define KBL_FW_MINOR 39
-
-#define GUC_FW_PATH(platform, major, minor) \
- "i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin"
-
-#define I915_SKL_GUC_UCODE GUC_FW_PATH(skl, SKL_FW_MAJOR, SKL_FW_MINOR)
-MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
-
-#define I915_BXT_GUC_UCODE GUC_FW_PATH(bxt, BXT_FW_MAJOR, BXT_FW_MINOR)
-MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
-
-#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR)
-MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
+#define __MAKE_GUC_FW_PATH(KEY) \
+ "i915/" \
+ __stringify(KEY##_GUC_FW_PREFIX) "_guc_" \
+ __stringify(KEY##_GUC_FW_MAJOR) "." \
+ __stringify(KEY##_GUC_FW_MINOR) "." \
+ __stringify(KEY##_GUC_FW_PATCH) ".bin"
+
+#define SKL_GUC_FW_PREFIX skl
+#define SKL_GUC_FW_MAJOR 32
+#define SKL_GUC_FW_MINOR 0
+#define SKL_GUC_FW_PATCH 3
+#define SKL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(SKL)
+MODULE_FIRMWARE(SKL_GUC_FIRMWARE_PATH);
+
+#define BXT_GUC_FW_PREFIX bxt
+#define BXT_GUC_FW_MAJOR 32
+#define BXT_GUC_FW_MINOR 0
+#define BXT_GUC_FW_PATCH 3
+#define BXT_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(BXT)
+MODULE_FIRMWARE(BXT_GUC_FIRMWARE_PATH);
+
+#define KBL_GUC_FW_PREFIX kbl
+#define KBL_GUC_FW_MAJOR 32
+#define KBL_GUC_FW_MINOR 0
+#define KBL_GUC_FW_PATCH 3
+#define KBL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(KBL)
+MODULE_FIRMWARE(KBL_GUC_FIRMWARE_PATH);
+
+#define GLK_GUC_FW_PREFIX glk
+#define GLK_GUC_FW_MAJOR 32
+#define GLK_GUC_FW_MINOR 0
+#define GLK_GUC_FW_PATCH 3
+#define GLK_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(GLK)
+MODULE_FIRMWARE(GLK_GUC_FIRMWARE_PATH);
+
+#define ICL_GUC_FW_PREFIX icl
+#define ICL_GUC_FW_MAJOR 32
+#define ICL_GUC_FW_MINOR 0
+#define ICL_GUC_FW_PATCH 3
+#define ICL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(ICL)
+MODULE_FIRMWARE(ICL_GUC_FIRMWARE_PATH);
static void guc_fw_select(struct intel_uc_fw *guc_fw)
{
struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *i915 = guc_to_i915(guc);
GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
- if (!HAS_GUC(dev_priv))
+ if (!HAS_GUC(i915))
return;
if (i915_modparams.guc_firmware_path) {
guc_fw->path = i915_modparams.guc_firmware_path;
guc_fw->major_ver_wanted = 0;
guc_fw->minor_ver_wanted = 0;
- } else if (IS_SKYLAKE(dev_priv)) {
- guc_fw->path = I915_SKL_GUC_UCODE;
- guc_fw->major_ver_wanted = SKL_FW_MAJOR;
- guc_fw->minor_ver_wanted = SKL_FW_MINOR;
- } else if (IS_BROXTON(dev_priv)) {
- guc_fw->path = I915_BXT_GUC_UCODE;
- guc_fw->major_ver_wanted = BXT_FW_MAJOR;
- guc_fw->minor_ver_wanted = BXT_FW_MINOR;
- } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- guc_fw->path = I915_KBL_GUC_UCODE;
- guc_fw->major_ver_wanted = KBL_FW_MAJOR;
- guc_fw->minor_ver_wanted = KBL_FW_MINOR;
+ } else if (IS_ICELAKE(i915)) {
+ guc_fw->path = ICL_GUC_FIRMWARE_PATH;
+ guc_fw->major_ver_wanted = ICL_GUC_FW_MAJOR;
+ guc_fw->minor_ver_wanted = ICL_GUC_FW_MINOR;
+ } else if (IS_GEMINILAKE(i915)) {
+ guc_fw->path = GLK_GUC_FIRMWARE_PATH;
+ guc_fw->major_ver_wanted = GLK_GUC_FW_MAJOR;
+ guc_fw->minor_ver_wanted = GLK_GUC_FW_MINOR;
+ } else if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
+ guc_fw->path = KBL_GUC_FIRMWARE_PATH;
+ guc_fw->major_ver_wanted = KBL_GUC_FW_MAJOR;
+ guc_fw->minor_ver_wanted = KBL_GUC_FW_MINOR;
+ } else if (IS_BROXTON(i915)) {
+ guc_fw->path = BXT_GUC_FIRMWARE_PATH;
+ guc_fw->major_ver_wanted = BXT_GUC_FW_MAJOR;
+ guc_fw->minor_ver_wanted = BXT_GUC_FW_MINOR;
+ } else if (IS_SKYLAKE(i915)) {
+ guc_fw->path = SKL_GUC_FIRMWARE_PATH;
+ guc_fw->major_ver_wanted = SKL_GUC_FW_MAJOR;
+ guc_fw->minor_ver_wanted = SKL_GUC_FW_MINOR;
}
}
@@ -90,7 +119,7 @@ void intel_guc_fw_init_early(struct intel_guc *guc)
{
struct intel_uc_fw *guc_fw = &guc->fw;
- intel_uc_fw_init(guc_fw, INTEL_UC_FW_TYPE_GUC);
+ intel_uc_fw_init_early(guc_fw, INTEL_UC_FW_TYPE_GUC);
guc_fw_select(guc_fw);
}
@@ -122,14 +151,16 @@ static void guc_prepare_xfer(struct intel_guc *guc)
}
/* Copy RSA signature from the fw image to HW for verification */
-static void guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
+static void guc_xfer_rsa(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uc_fw *fw = &guc->fw;
+ struct sg_table *pages = fw->obj->mm.pages;
u32 rsa[UOS_RSA_SCRATCH_COUNT];
int i;
- sg_pcopy_to_buffer(vma->pages->sgl, vma->pages->nents,
- rsa, sizeof(rsa), guc->fw.rsa_offset);
+ sg_pcopy_to_buffer(pages->sgl, pages->nents,
+ rsa, sizeof(rsa), fw->rsa_offset);
for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
@@ -201,7 +232,7 @@ static int guc_wait_ucode(struct intel_guc *guc)
* transfer between GTT locations. This functionality is left out of the API
* for now as there is no need for it.
*/
-static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
+static int guc_xfer_ucode(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_uc_fw *guc_fw = &guc->fw;
@@ -214,7 +245,7 @@ static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
/* Set the source address for the new blob */
- offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
+ offset = intel_uc_fw_ggtt_offset(guc_fw) + guc_fw->header_offset;
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
@@ -233,7 +264,7 @@ static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
/*
* Load the GuC firmware blob into the MinuteIA.
*/
-static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
+static int guc_fw_xfer(struct intel_uc_fw *guc_fw)
{
struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -250,9 +281,9 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
* by the DMA engine in one operation, whereas the RSA signature is
* loaded via MMIO.
*/
- guc_xfer_rsa(guc, vma);
+ guc_xfer_rsa(guc);
- ret = guc_xfer_ucode(guc, vma);
+ ret = guc_xfer_ucode(guc);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index b2f5148f4f17..f55f3bc8524d 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -39,6 +39,14 @@
#define GUC_VIDEO_ENGINE2 4
#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
+/*
+ * XXX: Beware that Gen9 firmware 32.x uses wrong definition for
+ * GUC_MAX_INSTANCES_PER_CLASS (1) but this is harmless for us now
+ * as we are not enabling GuC submission mode where this will be used
+ */
+#define GUC_MAX_ENGINE_CLASSES 5
+#define GUC_MAX_INSTANCES_PER_CLASS 4
+
#define GUC_DOORBELL_INVALID 256
#define GUC_DB_SIZE (PAGE_SIZE)
@@ -73,44 +81,28 @@
#define GUC_STAGE_DESC_ATTR_PCH BIT(6)
#define GUC_STAGE_DESC_ATTR_TERMINATED BIT(7)
-/* The guc control data is 10 DWORDs */
+/* New GuC control data */
#define GUC_CTL_CTXINFO 0
#define GUC_CTL_CTXNUM_IN16_SHIFT 0
#define GUC_CTL_BASE_ADDR_SHIFT 12
-#define GUC_CTL_ARAT_HIGH 1
-#define GUC_CTL_ARAT_LOW 2
-
-#define GUC_CTL_DEVICE_INFO 3
-
-#define GUC_CTL_LOG_PARAMS 4
+#define GUC_CTL_LOG_PARAMS 1
#define GUC_LOG_VALID (1 << 0)
#define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1)
#define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3)
#define GUC_LOG_CRASH_SHIFT 4
-#define GUC_LOG_CRASH_MASK (0x1 << GUC_LOG_CRASH_SHIFT)
+#define GUC_LOG_CRASH_MASK (0x3 << GUC_LOG_CRASH_SHIFT)
#define GUC_LOG_DPC_SHIFT 6
#define GUC_LOG_DPC_MASK (0x7 << GUC_LOG_DPC_SHIFT)
#define GUC_LOG_ISR_SHIFT 9
#define GUC_LOG_ISR_MASK (0x7 << GUC_LOG_ISR_SHIFT)
#define GUC_LOG_BUF_ADDR_SHIFT 12
-#define GUC_CTL_PAGE_FAULT_CONTROL 5
+#define GUC_CTL_WA 2
+#define GUC_CTL_FEATURE 3
+#define GUC_CTL_DISABLE_SCHEDULER (1 << 14)
-#define GUC_CTL_WA 6
-#define GUC_CTL_WA_UK_BY_DRIVER (1 << 3)
-
-#define GUC_CTL_FEATURE 7
-#define GUC_CTL_VCS2_ENABLED (1 << 0)
-#define GUC_CTL_KERNEL_SUBMISSIONS (1 << 1)
-#define GUC_CTL_FEATURE2 (1 << 2)
-#define GUC_CTL_POWER_GATING (1 << 3)
-#define GUC_CTL_DISABLE_SCHEDULER (1 << 4)
-#define GUC_CTL_PREEMPTION_LOG (1 << 5)
-#define GUC_CTL_ENABLE_SLPC (1 << 7)
-#define GUC_CTL_RESET_ON_PREMPT_FAILURE (1 << 8)
-
-#define GUC_CTL_DEBUG 8
+#define GUC_CTL_DEBUG 4
#define GUC_LOG_VERBOSITY_SHIFT 0
#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT)
#define GUC_LOG_VERBOSITY_MED (1 << GUC_LOG_VERBOSITY_SHIFT)
@@ -123,13 +115,10 @@
#define GUC_LOG_DESTINATION_MASK (3 << 4)
#define GUC_LOG_DISABLED (1 << 6)
#define GUC_PROFILE_ENABLED (1 << 7)
-#define GUC_WQ_TRACK_ENABLED (1 << 8)
-#define GUC_ADS_ENABLED (1 << 9)
-#define GUC_LOG_DEFAULT_DISABLED (1 << 10)
-#define GUC_ADS_ADDR_SHIFT 11
-#define GUC_ADS_ADDR_MASK 0xfffff800
-#define GUC_CTL_RSRVD 9
+#define GUC_CTL_ADS 5
+#define GUC_ADS_ADDR_SHIFT 1
+#define GUC_ADS_ADDR_MASK (0xFFFFF << GUC_ADS_ADDR_SHIFT)
#define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */
@@ -168,11 +157,7 @@
* in fw. So driver will load a truncated firmware in this case.
*
* HuC firmware layout is same as GuC firmware.
- *
- * HuC firmware css header is different. However, the only difference is where
- * the version information is saved. The uc_css_header is unified to support
- * both. Driver should get HuC version from uc_css_header.huc_sw_version, while
- * uc_css_header.guc_sw_version for GuC.
+ * Only HuC version information is saved in a different way.
*/
struct uc_css_header {
@@ -183,41 +168,27 @@ struct uc_css_header {
u32 header_version;
u32 module_id;
u32 module_vendor;
- union {
- struct {
- u8 day;
- u8 month;
- u16 year;
- };
- u32 date;
- };
+ u32 date;
+#define CSS_DATE_DAY (0xFF << 0)
+#define CSS_DATE_MONTH (0xFF << 8)
+#define CSS_DATE_YEAR (0xFFFF << 16)
u32 size_dw; /* uCode plus header_size_dw */
u32 key_size_dw;
u32 modulus_size_dw;
u32 exponent_size_dw;
- union {
- struct {
- u8 hour;
- u8 min;
- u16 sec;
- };
- u32 time;
- };
-
+ u32 time;
+#define CSS_TIME_HOUR (0xFF << 0)
+#define CSS_DATE_MIN (0xFF << 8)
+#define CSS_DATE_SEC (0xFFFF << 16)
char username[8];
char buildnumber[12];
- union {
- struct {
- u32 branch_client_version;
- u32 sw_version;
- } guc;
- struct {
- u32 sw_version;
- u32 reserved;
- } huc;
- };
- u32 prod_preprod_fw;
- u32 reserved[12];
+ u32 sw_version;
+#define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16)
+#define CSS_SW_VERSION_GUC_MINOR (0xFF << 8)
+#define CSS_SW_VERSION_GUC_PATCH (0xFF << 0)
+#define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16)
+#define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0)
+ u32 reserved[14];
u32 header_info;
} __packed;
@@ -384,14 +355,16 @@ struct guc_ct_buffer_desc {
*
* bit[4..0] message len (in dwords)
* bit[7..5] reserved
- * bit[8] write fence to desc
- * bit[9] write status to H2G buff
- * bit[10] send status (via G2H)
+ * bit[8] response (G2H only)
+ * bit[8] write fence to desc (H2G only)
+ * bit[9] write status to H2G buff (H2G only)
+ * bit[10] send status back via G2H (H2G only)
* bit[15..11] reserved
* bit[31..16] action code
*/
#define GUC_CT_MSG_LEN_SHIFT 0
#define GUC_CT_MSG_LEN_MASK 0x1F
+#define GUC_CT_MSG_IS_RESPONSE (1 << 8)
#define GUC_CT_MSG_WRITE_FENCE_TO_DESC (1 << 8)
#define GUC_CT_MSG_WRITE_STATUS_TO_BUFF (1 << 9)
#define GUC_CT_MSG_SEND_STATUS (1 << 10)
@@ -423,23 +396,19 @@ struct guc_ct_buffer_desc {
struct guc_policy {
/* Time for one workload to execute. (in micro seconds) */
u32 execution_quantum;
- u32 reserved1;
-
/* Time to wait for a preemption request to completed before issuing a
* reset. (in micro seconds). */
u32 preemption_time;
-
/* How much time to allow to run after the first fault is observed.
* Then preempt afterwards. (in micro seconds) */
u32 fault_time;
-
u32 policy_flags;
- u32 reserved[2];
+ u32 reserved[8];
} __packed;
struct guc_policies {
- struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINES_NUM];
-
+ struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINE_CLASSES];
+ u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES];
/* In micro seconds. How much time to allow before DPC processing is
* called back via interrupt (to prevent DPC queue drain starving).
* Typically 1000s of micro seconds (example only, not granularity). */
@@ -452,57 +421,73 @@ struct guc_policies {
* idle. */
u32 max_num_work_items;
- u32 reserved[19];
+ u32 reserved[4];
} __packed;
/* GuC MMIO reg state struct */
-#define GUC_REGSET_FLAGS_NONE 0x0
-#define GUC_REGSET_POWERCYCLE 0x1
-#define GUC_REGSET_MASKED 0x2
-#define GUC_REGSET_ENGINERESET 0x4
-#define GUC_REGSET_SAVE_DEFAULT_VALUE 0x8
-#define GUC_REGSET_SAVE_CURRENT_VALUE 0x10
-#define GUC_REGSET_MAX_REGISTERS 25
-#define GUC_MMIO_WHITE_LIST_START 0x24d0
-#define GUC_MMIO_WHITE_LIST_MAX 12
+#define GUC_REGSET_MAX_REGISTERS 64
#define GUC_S3_SAVE_SPACE_PAGES 10
-struct guc_mmio_regset {
- struct __packed {
- u32 offset;
- u32 value;
- u32 flags;
- } registers[GUC_REGSET_MAX_REGISTERS];
+struct guc_mmio_reg {
+ u32 offset;
+ u32 value;
+ u32 flags;
+#define GUC_REGSET_MASKED (1 << 0)
+} __packed;
+struct guc_mmio_regset {
+ struct guc_mmio_reg registers[GUC_REGSET_MAX_REGISTERS];
u32 values_valid;
u32 number_of_registers;
} __packed;
-/* MMIO registers that are set as non privileged */
-struct mmio_white_list {
- u32 mmio_start;
- u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
- u32 count;
+/* GuC register sets */
+struct guc_mmio_reg_state {
+ struct guc_mmio_regset engine_reg[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
+ u32 reserved[98];
} __packed;
-struct guc_mmio_reg_state {
- struct guc_mmio_regset global_reg;
- struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM];
- struct mmio_white_list white_list[GUC_MAX_ENGINES_NUM];
+/* HW info */
+struct guc_gt_system_info {
+ u32 slice_enabled;
+ u32 rcs_enabled;
+ u32 reserved0;
+ u32 bcs_enabled;
+ u32 vdbox_enable_mask;
+ u32 vdbox_sfc_support_mask;
+ u32 vebox_enable_mask;
+ u32 reserved[9];
} __packed;
-/* GuC Additional Data Struct */
+/* Clients info */
+struct guc_ct_pool_entry {
+ struct guc_ct_buffer_desc desc;
+ u32 reserved[7];
+} __packed;
+
+#define GUC_CT_POOL_SIZE 2
+struct guc_clients_info {
+ u32 clients_num;
+ u32 reserved0[13];
+ u32 ct_pool_addr;
+ u32 ct_pool_count;
+ u32 reserved[4];
+} __packed;
+
+/* GuC Additional Data Struct */
struct guc_ads {
u32 reg_state_addr;
u32 reg_state_buffer;
- u32 golden_context_lrca;
u32 scheduler_policies;
- u32 reserved0[3];
- u32 eng_state_size[GUC_MAX_ENGINES_NUM];
- u32 reserved2[4];
+ u32 gt_system_info;
+ u32 clients_info;
+ u32 control_data;
+ u32 golden_context_lrca[GUC_MAX_ENGINE_CLASSES];
+ u32 eng_state_size[GUC_MAX_ENGINE_CLASSES];
+ u32 reserved[16];
} __packed;
/* GuC logging structures */
@@ -515,6 +500,8 @@ enum guc_log_buffer_type {
};
/**
+ * struct guc_log_buffer_state - GuC log buffer state
+ *
* Below state structure is used for coordination of retrieval of GuC firmware
* logs. Separate state is maintained for each log buffer type.
* read_ptr points to the location where i915 read last in log buffer and
@@ -646,7 +633,6 @@ enum intel_guc_action {
INTEL_GUC_ACTION_DEFAULT = 0x0,
INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2,
INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3,
- INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
@@ -654,6 +640,7 @@ enum intel_guc_action {
INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003,
+ INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x3005,
INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
@@ -674,9 +661,9 @@ enum intel_guc_report_status {
};
enum intel_guc_sleep_state_status {
- INTEL_GUC_SLEEP_STATE_SUCCESS = 0x0,
- INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x1,
- INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x2
+ INTEL_GUC_SLEEP_STATE_SUCCESS = 0x1,
+ INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x2,
+ INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x3
#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000
};
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 7146524264dd..e3b83ecb90b5 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -208,7 +208,9 @@ static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
/* buffer_full_cnt is a 4 bit counter */
log->stats[type].sampled_overflow += 16;
}
- DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
+
+ dev_notice_ratelimited(guc_to_i915(log_to_guc(log))->drm.dev,
+ "GuC log buffer overflow\n");
}
return overflow;
@@ -343,32 +345,21 @@ static void capture_logs_work(struct work_struct *work)
static int guc_log_map(struct intel_guc_log *log)
{
- struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
void *vaddr;
- int ret;
lockdep_assert_held(&log->relay.lock);
if (!log->vma)
return -ENODEV;
- mutex_lock(&dev_priv->drm.struct_mutex);
- ret = i915_gem_object_set_to_wc_domain(log->vma->obj, true);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- if (ret)
- return ret;
-
/*
* Create a WC (Uncached for read) vmalloc mapping of log
* buffer pages, so that we can directly get the data
* (up-to-date) from memory.
*/
vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC);
- if (IS_ERR(vaddr)) {
- DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
+ if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
- }
log->relay.buf_addr = vaddr;
@@ -447,7 +438,7 @@ static void guc_log_capture_logs(struct intel_guc_log *log)
* Generally device is expected to be active only at this
* time, so get/put should be really quick.
*/
- with_intel_runtime_pm(dev_priv, wakeref)
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
guc_action_flush_log_complete(guc);
}
@@ -526,7 +517,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
if (log->level == level)
goto out_unlock;
- with_intel_runtime_pm(dev_priv, wakeref)
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
ret = guc_action_control_log(guc,
GUC_LOG_LEVEL_IS_VERBOSE(level),
GUC_LOG_LEVEL_IS_ENABLED(level),
@@ -611,7 +602,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
*/
flush_work(&log->relay.flush_work);
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
guc_action_flush_log(guc);
/* GuC would have updated log buffer by now, so capture it */
diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h
index 57e7ad522c2f..a214f8b71929 100644
--- a/drivers/gpu/drm/i915/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/intel_guc_reg.h
@@ -51,6 +51,9 @@
#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4)
#define SOFT_SCRATCH_COUNT 16
+#define GEN11_SOFT_SCRATCH(n) _MMIO(0x190240 + (n) * 4)
+#define GEN11_SOFT_SCRATCH_COUNT 4
+
#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4)
#define UOS_RSA_SCRATCH_COUNT 64
@@ -76,6 +79,9 @@
#define HUC_STATUS2 _MMIO(0xD3B0)
#define HUC_FW_VERIFIED (1<<7)
+#define GEN11_HUC_KERNEL_LOAD_INFO _MMIO(0xC1DC)
+#define HUC_LOAD_SUCCESSFUL (1 << 0)
+
#define GUC_WOPCM_SIZE _MMIO(0xc050)
#define GUC_WOPCM_SIZE_LOCKED (1<<0)
#define GUC_WOPCM_SIZE_SHIFT 12
@@ -103,6 +109,7 @@
#define GUC_SEND_INTERRUPT _MMIO(0xc4c8)
#define GUC_SEND_TRIGGER (1<<0)
+#define GEN11_GUC_HOST_INTERRUPT _MMIO(0x1901f0)
#define GUC_NUM_DOORBELLS 256
@@ -127,4 +134,22 @@ struct guc_doorbell_info {
#define GUC_WD_VECS_IER _MMIO(0xC558)
#define GUC_PM_P24C_IER _MMIO(0xC55C)
+/* GuC Interrupt Vector */
+#define GEN11_GUC_INTR_GUC2HOST (1 << 15)
+#define GEN11_GUC_INTR_EXEC_ERROR (1 << 14)
+#define GEN11_GUC_INTR_DISPLAY_EVENT (1 << 13)
+#define GEN11_GUC_INTR_SEM_SIG (1 << 12)
+#define GEN11_GUC_INTR_IOMMU2GUC (1 << 11)
+#define GEN11_GUC_INTR_DOORBELL_RANG (1 << 10)
+#define GEN11_GUC_INTR_DMA_DONE (1 << 9)
+#define GEN11_GUC_INTR_FATAL_ERROR (1 << 8)
+#define GEN11_GUC_INTR_NOTIF_ERROR (1 << 7)
+#define GEN11_GUC_INTR_SW_INT_6 (1 << 6)
+#define GEN11_GUC_INTR_SW_INT_5 (1 << 5)
+#define GEN11_GUC_INTR_SW_INT_4 (1 << 4)
+#define GEN11_GUC_INTR_SW_INT_3 (1 << 3)
+#define GEN11_GUC_INTR_SW_INT_2 (1 << 2)
+#define GEN11_GUC_INTR_SW_INT_1 (1 << 1)
+#define GEN11_GUC_INTR_SW_INT_0 (1 << 0)
+
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 46cd0e70aecb..db531ebc7704 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -24,8 +24,12 @@
#include <linux/circ_buf.h>
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_lrc_reg.h"
+#include "gt/intel_context.h"
+#include "gem/i915_gem_context.h"
+
#include "intel_guc_submission.h"
-#include "intel_lrc_reg.h"
#include "i915_drv.h"
#define GUC_PREEMPT_FINISHED 0x1
@@ -362,11 +366,10 @@ static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
static void guc_stage_desc_init(struct intel_guc_client *client)
{
struct intel_guc *guc = client->guc;
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_engine_cs *engine;
struct i915_gem_context *ctx = client->owner;
+ struct i915_gem_engines_iter it;
struct guc_stage_desc *desc;
- unsigned int tmp;
+ struct intel_context *ce;
u32 gfx_addr;
desc = __get_stage_desc(client);
@@ -380,10 +383,11 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
desc->priority = client->priority;
desc->db_id = client->doorbell_id;
- for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
- struct intel_context *ce = intel_context_lookup(ctx, engine);
- u32 guc_engine_id = engine->guc_id;
- struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ struct guc_execlist_context *lrc;
+
+ if (!(ce->engine->mask & client->engines))
+ continue;
/* TODO: We have a design issue to be solved here. Only when we
* receive the first batch, we know which engine is used by the
@@ -392,7 +396,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
* for now who owns a GuC client. But for future owner of GuC
* client, need to make sure lrc is pinned prior to enter here.
*/
- if (!ce || !ce->state)
+ if (!ce->state)
break; /* XXX: continue? */
/*
@@ -402,6 +406,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
* Instead, the GuC uses the LRCA of the user mode context (see
* guc_add_request below).
*/
+ lrc = &desc->lrc[ce->engine->guc_id];
lrc->context_desc = lower_32_bits(ce->lrc_desc);
/* The state page is after PPHWSP */
@@ -412,15 +417,16 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
* here. In proxy submission, it wants the stage id
*/
lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) |
- (guc_engine_id << GUC_ELC_ENGINE_OFFSET);
+ (ce->engine->guc_id << GUC_ELC_ENGINE_OFFSET);
lrc->ring_begin = intel_guc_ggtt_offset(guc, ce->ring->vma);
lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0;
- desc->engines_used |= (1 << guc_engine_id);
+ desc->engines_used |= BIT(ce->engine->guc_id);
}
+ i915_gem_context_unlock_engines(ctx);
DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
client->engines, desc->engines_used);
@@ -551,10 +557,10 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
*/
static void flush_ggtt_writes(struct i915_vma *vma)
{
- struct drm_i915_private *dev_priv = vma->vm->i915;
+ struct drm_i915_private *i915 = vma->vm->i915;
if (i915_vma_is_map_and_fenceable(vma))
- POSTING_READ_FW(GUC_STATUS);
+ intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS);
}
static void inject_preempt_context(struct work_struct *work)
@@ -734,7 +740,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
bool submit = false;
struct rb_node *rb;
- lockdep_assert_held(&engine->timeline.lock);
+ lockdep_assert_held(&engine->active.lock);
if (port_isset(port)) {
if (intel_engine_has_preemption(engine)) {
@@ -742,7 +748,8 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
&engine->i915->guc.preempt_work[engine->id];
int prio = execlists->queue_priority_hint;
- if (__execlists_need_preempt(prio, port_prio(port))) {
+ if (i915_scheduler_need_preempt(prio,
+ port_prio(port))) {
execlists_set_active(execlists,
EXECLISTS_ACTIVE_PREEMPT);
queue_work(engine->i915->guc.preempt_wq,
@@ -815,7 +822,7 @@ static void guc_submission_tasklet(unsigned long data)
struct i915_request *rq;
unsigned long flags;
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
rq = port_request(port);
while (rq && i915_request_completed(rq)) {
@@ -840,7 +847,7 @@ static void guc_submission_tasklet(unsigned long data)
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
guc_dequeue(engine);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void guc_reset_prepare(struct intel_engine_cs *engine)
@@ -877,7 +884,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled)
struct i915_request *rq;
unsigned long flags;
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
execlists_cancel_port_requests(execlists);
@@ -893,7 +900,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled)
intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled);
out_unlock:
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void guc_cancel_requests(struct intel_engine_cs *engine)
@@ -919,13 +926,13 @@ static void guc_cancel_requests(struct intel_engine_cs *engine)
* submission's irq state, we also wish to remind ourselves that
* it is irq state.)
*/
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
/* Cancel the requests on the HW and clear the ELSP tracker. */
execlists_cancel_port_requests(execlists);
/* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &engine->timeline.requests, link) {
+ list_for_each_entry(rq, &engine->active.requests, sched.link) {
if (!i915_request_signaled(rq))
dma_fence_set_error(&rq->fence, -EIO);
@@ -954,7 +961,7 @@ static void guc_cancel_requests(struct intel_engine_cs *engine)
execlists->queue = RB_ROOT_CACHED;
GEM_BUG_ON(port_isset(execlists->port));
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void guc_reset_finish(struct intel_engine_cs *engine)
@@ -1194,7 +1201,7 @@ static void __guc_client_disable(struct intel_guc_client *client)
* the case, instead of trying (in vain) to communicate with it, let's
* just cleanup the doorbell HW and our internal state.
*/
- if (intel_guc_is_alive(client->guc))
+ if (intel_guc_is_loaded(client->guc))
destroy_doorbell(client);
else
__fini_doorbell(client);
@@ -1299,7 +1306,7 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
*/
irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MODE_GEN7(engine), irqs);
+ ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
@@ -1346,7 +1353,7 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MODE_GEN7(engine), irqs);
+ ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
/* route all GT interrupts to the host */
I915_WRITE(GUC_BCS_RCS_IER, 0);
@@ -1359,6 +1366,7 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
static void guc_submission_park(struct intel_engine_cs *engine)
{
+ intel_engine_park(engine);
intel_engine_unpin_breadcrumbs_irq(engine);
engine->flags &= ~I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
}
@@ -1420,10 +1428,6 @@ int intel_guc_submission_enable(struct intel_guc *guc)
GEM_BUG_ON(!guc->execbuf_client);
- err = intel_guc_sample_forcewake(guc);
- if (err)
- return err;
-
err = guc_clients_enable(guc);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.h b/drivers/gpu/drm/i915/intel_guc_submission.h
index aa5e6749c925..7d823a513b9c 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/intel_guc_submission.h
@@ -27,9 +27,10 @@
#include <linux/spinlock.h>
+#include "gt/intel_engine_types.h"
+
#include "i915_gem.h"
#include "i915_selftest.h"
-#include "intel_engine_types.h"
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 94c04f16a2ad..fb6f693d3cac 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -29,7 +29,19 @@
void intel_huc_init_early(struct intel_huc *huc)
{
+ struct drm_i915_private *i915 = huc_to_i915(huc);
+
intel_huc_fw_init_early(huc);
+
+ if (INTEL_GEN(i915) >= 11) {
+ huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO;
+ huc->status.mask = HUC_LOAD_SUCCESSFUL;
+ huc->status.value = HUC_LOAD_SUCCESSFUL;
+ } else {
+ huc->status.reg = HUC_STATUS2;
+ huc->status.mask = HUC_FW_VERIFIED;
+ huc->status.value = HUC_FW_VERIFIED;
+ }
}
int intel_huc_init_misc(struct intel_huc *huc)
@@ -40,6 +52,61 @@ int intel_huc_init_misc(struct intel_huc *huc)
return 0;
}
+static int intel_huc_rsa_data_create(struct intel_huc *huc)
+{
+ struct drm_i915_private *i915 = huc_to_i915(huc);
+ struct intel_guc *guc = &i915->guc;
+ struct i915_vma *vma;
+ void *vaddr;
+
+ /*
+ * HuC firmware will sit above GUC_GGTT_TOP and will not map
+ * through GTT. Unfortunately, this means GuC cannot perform
+ * the HuC auth. as the rsa offset now falls within the GuC
+ * inaccessible range. We resort to perma-pinning an additional
+ * vma within the accessible range that only contains the rsa
+ * signature. The GuC can use this extra pinning to perform
+ * the authentication since its GGTT offset will be GuC
+ * accessible.
+ */
+ vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ i915_vma_unpin_and_release(&vma, 0);
+ return PTR_ERR(vaddr);
+ }
+
+ huc->rsa_data = vma;
+ huc->rsa_data_vaddr = vaddr;
+
+ return 0;
+}
+
+static void intel_huc_rsa_data_destroy(struct intel_huc *huc)
+{
+ i915_vma_unpin_and_release(&huc->rsa_data, I915_VMA_RELEASE_MAP);
+}
+
+int intel_huc_init(struct intel_huc *huc)
+{
+ int err;
+
+ err = intel_huc_rsa_data_create(huc);
+ if (err)
+ return err;
+
+ return intel_uc_fw_init(&huc->fw);
+}
+
+void intel_huc_fini(struct intel_huc *huc)
+{
+ intel_uc_fw_fini(&huc->fw);
+ intel_huc_rsa_data_destroy(huc);
+}
+
/**
* intel_huc_auth() - Authenticate HuC uCode
* @huc: intel_huc structure
@@ -55,45 +122,31 @@ int intel_huc_auth(struct intel_huc *huc)
{
struct drm_i915_private *i915 = huc_to_i915(huc);
struct intel_guc *guc = &i915->guc;
- struct i915_vma *vma;
- u32 status;
int ret;
if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
return -ENOEXEC;
- vma = i915_gem_object_ggtt_pin(huc->fw.obj, NULL, 0, 0,
- PIN_OFFSET_BIAS | i915->ggtt.pin_bias);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- DRM_ERROR("HuC: Failed to pin huc fw object %d\n", ret);
- goto fail;
- }
-
ret = intel_guc_auth_huc(guc,
- intel_guc_ggtt_offset(guc, vma) +
- huc->fw.rsa_offset);
+ intel_guc_ggtt_offset(guc, huc->rsa_data));
if (ret) {
DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
- goto fail_unpin;
+ goto fail;
}
/* Check authentication status, it should be done by now */
ret = __intel_wait_for_register(&i915->uncore,
- HUC_STATUS2,
- HUC_FW_VERIFIED,
- HUC_FW_VERIFIED,
- 2, 50, &status);
+ huc->status.reg,
+ huc->status.mask,
+ huc->status.value,
+ 2, 50, NULL);
if (ret) {
- DRM_ERROR("HuC: Firmware not verified %#x\n", status);
- goto fail_unpin;
+ DRM_ERROR("HuC: Firmware not verified %d\n", ret);
+ goto fail;
}
- i915_vma_unpin(vma);
return 0;
-fail_unpin:
- i915_vma_unpin(vma);
fail:
huc->fw.load_status = INTEL_UC_FIRMWARE_FAIL;
@@ -121,8 +174,9 @@ int intel_huc_check_status(struct intel_huc *huc)
if (!HAS_HUC(dev_priv))
return -ENODEV;
- with_intel_runtime_pm(dev_priv, wakeref)
- status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
+ status = (I915_READ(huc->status.reg) & huc->status.mask) ==
+ huc->status.value;
return status;
}
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h
index 7e41d870b509..2a6c94e79f17 100644
--- a/drivers/gpu/drm/i915/intel_huc.h
+++ b/drivers/gpu/drm/i915/intel_huc.h
@@ -25,6 +25,7 @@
#ifndef _INTEL_HUC_H_
#define _INTEL_HUC_H_
+#include "i915_reg.h"
#include "intel_uc_fw.h"
#include "intel_huc_fw.h"
@@ -33,16 +34,26 @@ struct intel_huc {
struct intel_uc_fw fw;
/* HuC-specific additions */
+ struct i915_vma *rsa_data;
+ void *rsa_data_vaddr;
+
+ struct {
+ i915_reg_t reg;
+ u32 mask;
+ u32 value;
+ } status;
};
void intel_huc_init_early(struct intel_huc *huc);
int intel_huc_init_misc(struct intel_huc *huc);
+int intel_huc_init(struct intel_huc *huc);
+void intel_huc_fini(struct intel_huc *huc);
int intel_huc_auth(struct intel_huc *huc);
int intel_huc_check_status(struct intel_huc *huc);
static inline void intel_huc_fini_misc(struct intel_huc *huc)
{
- intel_uc_fw_fini(&huc->fw);
+ intel_uc_fw_cleanup_fetch(&huc->fw);
}
static inline int intel_huc_sanitize(struct intel_huc *huc)
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
index 68d47c105939..05cbf8338f53 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/intel_huc_fw.c
@@ -34,6 +34,14 @@
#define KBL_HUC_FW_MINOR 00
#define KBL_BLD_NUM 1810
+#define GLK_HUC_FW_MAJOR 03
+#define GLK_HUC_FW_MINOR 01
+#define GLK_BLD_NUM 2893
+
+#define ICL_HUC_FW_MAJOR 8
+#define ICL_HUC_FW_MINOR 4
+#define ICL_BLD_NUM 3238
+
#define HUC_FW_PATH(platform, major, minor, bld_num) \
"i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \
__stringify(minor) "_" __stringify(bld_num) ".bin"
@@ -50,6 +58,14 @@ MODULE_FIRMWARE(I915_BXT_HUC_UCODE);
KBL_HUC_FW_MINOR, KBL_BLD_NUM)
MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
+#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \
+ GLK_HUC_FW_MINOR, GLK_BLD_NUM)
+MODULE_FIRMWARE(I915_GLK_HUC_UCODE);
+
+#define I915_ICL_HUC_UCODE HUC_FW_PATH(icl, ICL_HUC_FW_MAJOR, \
+ ICL_HUC_FW_MINOR, ICL_BLD_NUM)
+MODULE_FIRMWARE(I915_ICL_HUC_UCODE);
+
static void huc_fw_select(struct intel_uc_fw *huc_fw)
{
struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
@@ -76,6 +92,14 @@ static void huc_fw_select(struct intel_uc_fw *huc_fw)
huc_fw->path = I915_KBL_HUC_UCODE;
huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ huc_fw->path = I915_GLK_HUC_UCODE;
+ huc_fw->major_ver_wanted = GLK_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = GLK_HUC_FW_MINOR;
+ } else if (IS_ICELAKE(dev_priv)) {
+ huc_fw->path = I915_ICL_HUC_UCODE;
+ huc_fw->major_ver_wanted = ICL_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = ICL_HUC_FW_MINOR;
}
}
@@ -89,22 +113,28 @@ void intel_huc_fw_init_early(struct intel_huc *huc)
{
struct intel_uc_fw *huc_fw = &huc->fw;
- intel_uc_fw_init(huc_fw, INTEL_UC_FW_TYPE_HUC);
+ intel_uc_fw_init_early(huc_fw, INTEL_UC_FW_TYPE_HUC);
huc_fw_select(huc_fw);
}
-/**
- * huc_fw_xfer() - DMA's the firmware
- * @huc_fw: the firmware descriptor
- * @vma: the firmware image (bound into the GGTT)
- *
- * Transfer the firmware image to RAM for execution by the microcontroller.
- *
- * Return: 0 on success, non-zero on failure
- */
-static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
+static void huc_xfer_rsa(struct intel_huc *huc)
{
- struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
+ struct intel_uc_fw *fw = &huc->fw;
+ struct sg_table *pages = fw->obj->mm.pages;
+
+ /*
+ * HuC firmware image is outside GuC accessible range.
+ * Copy the RSA signature out of the image into
+ * the perma-pinned region set aside for it
+ */
+ sg_pcopy_to_buffer(pages->sgl, pages->nents,
+ huc->rsa_data_vaddr, fw->rsa_size,
+ fw->rsa_offset);
+}
+
+static int huc_xfer_ucode(struct intel_huc *huc)
+{
+ struct intel_uc_fw *huc_fw = &huc->fw;
struct drm_i915_private *dev_priv = huc_to_i915(huc);
struct intel_uncore *uncore = &dev_priv->uncore;
unsigned long offset = 0;
@@ -116,7 +146,7 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
/* Set the source address for the uCode */
- offset = intel_guc_ggtt_offset(&dev_priv->guc, vma) +
+ offset = intel_uc_fw_ggtt_offset(huc_fw) +
huc_fw->header_offset;
intel_uncore_write(uncore, DMA_ADDR_0_LOW,
lower_32_bits(offset));
@@ -151,6 +181,23 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
}
/**
+ * huc_fw_xfer() - DMA's the firmware
+ * @huc_fw: the firmware descriptor
+ *
+ * Transfer the firmware image to RAM for execution by the microcontroller.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int huc_fw_xfer(struct intel_uc_fw *huc_fw)
+{
+ struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
+
+ huc_xfer_rsa(huc);
+
+ return huc_xfer_ucode(huc);
+}
+
+/**
* intel_huc_fw_upload() - load HuC uCode to device
* @huc: intel_huc structure
*
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 44be676fabd6..d9a7a13ce32a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -33,11 +33,15 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+#include "display/intel_atomic.h"
+#include "display/intel_fbc.h"
+#include "display/intel_sprite.h"
+
#include "i915_drv.h"
+#include "i915_irq.h"
#include "intel_drv.h"
-#include "intel_fbc.h"
#include "intel_pm.h"
-#include "intel_sprite.h"
+#include "intel_sideband.h"
#include "../../../platform/x86/intel_ips.h"
/**
@@ -188,8 +192,8 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
{
u16 ddrpll, csipll;
- ddrpll = I915_READ16(DDRMPLL1);
- csipll = I915_READ16(CSIPLL0);
+ ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
+ csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
switch (ddrpll & 0xff) {
case 0xc:
@@ -317,7 +321,7 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
{
u32 val;
- mutex_lock(&dev_priv->pcu_lock);
+ vlv_punit_get(dev_priv);
val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
if (enable)
@@ -332,14 +336,14 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
- mutex_unlock(&dev_priv->pcu_lock);
+ vlv_punit_put(dev_priv);
}
static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
{
u32 val;
- mutex_lock(&dev_priv->pcu_lock);
+ vlv_punit_get(dev_priv);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
if (enable)
@@ -348,7 +352,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
val &= ~DSP_MAXFIFO_PM5_ENABLE;
vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
- mutex_unlock(&dev_priv->pcu_lock);
+ vlv_punit_put(dev_priv);
}
#define FW_WM(value, plane) \
@@ -675,7 +679,7 @@ static unsigned int intel_wm_method1(unsigned int pixel_rate,
{
u64 ret;
- ret = (u64)pixel_rate * cpp * latency;
+ ret = mul_u32_u32(pixel_rate, cpp * latency);
ret = DIV_ROUND_UP_ULL(ret, 10000);
return ret;
@@ -1946,6 +1950,7 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
int sprite0_start, sprite1_start, fifo_size;
@@ -1971,13 +1976,13 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
* intel_pipe_update_start() has already disabled interrupts
* for us, so a plain spin_lock() is sufficient here.
*/
- spin_lock(&dev_priv->uncore.lock);
+ spin_lock(&uncore->lock);
switch (crtc->pipe) {
u32 dsparb, dsparb2, dsparb3;
case PIPE_A:
- dsparb = I915_READ_FW(DSPARB);
- dsparb2 = I915_READ_FW(DSPARB2);
+ dsparb = intel_uncore_read_fw(uncore, DSPARB);
+ dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
VLV_FIFO(SPRITEB, 0xff));
@@ -1989,12 +1994,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
- I915_WRITE_FW(DSPARB, dsparb);
- I915_WRITE_FW(DSPARB2, dsparb2);
+ intel_uncore_write_fw(uncore, DSPARB, dsparb);
+ intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
break;
case PIPE_B:
- dsparb = I915_READ_FW(DSPARB);
- dsparb2 = I915_READ_FW(DSPARB2);
+ dsparb = intel_uncore_read_fw(uncore, DSPARB);
+ dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
VLV_FIFO(SPRITED, 0xff));
@@ -2006,12 +2011,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
- I915_WRITE_FW(DSPARB, dsparb);
- I915_WRITE_FW(DSPARB2, dsparb2);
+ intel_uncore_write_fw(uncore, DSPARB, dsparb);
+ intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
break;
case PIPE_C:
- dsparb3 = I915_READ_FW(DSPARB3);
- dsparb2 = I915_READ_FW(DSPARB2);
+ dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
+ dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
VLV_FIFO(SPRITEF, 0xff));
@@ -2023,16 +2028,16 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
- I915_WRITE_FW(DSPARB3, dsparb3);
- I915_WRITE_FW(DSPARB2, dsparb2);
+ intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
+ intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
break;
default:
break;
}
- POSTING_READ_FW(DSPARB);
+ intel_uncore_posting_read_fw(uncore, DSPARB);
- spin_unlock(&dev_priv->uncore.lock);
+ spin_unlock(&uncore->lock);
}
#undef VLV_FIFO
@@ -2810,6 +2815,8 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
u16 wm[8])
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
if (INTEL_GEN(dev_priv) >= 9) {
u32 val;
int ret, i;
@@ -2817,11 +2824,9 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
/* read the first set of memory latencies[0:3] */
val = 0; /* data0 to be programmed to 0 for first set */
- mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_read(dev_priv,
GEN9_PCODE_READ_MEM_LATENCY,
- &val);
- mutex_unlock(&dev_priv->pcu_lock);
+ &val, NULL);
if (ret) {
DRM_ERROR("SKL Mailbox read error = %d\n", ret);
@@ -2838,11 +2843,9 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
/* read the second set of memory latencies[4:7] */
val = 1; /* data0 to be programmed to 1 for second set */
- mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_read(dev_priv,
GEN9_PCODE_READ_MEM_LATENCY,
- &val);
- mutex_unlock(&dev_priv->pcu_lock);
+ &val, NULL);
if (ret) {
DRM_ERROR("SKL Mailbox read error = %d\n", ret);
return;
@@ -2895,7 +2898,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
wm[0] += 1;
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- u64 sskpd = I915_READ64(MCH_SSKPD);
+ u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD);
wm[0] = (sskpd >> 56) & 0xFF;
if (wm[0] == 0)
@@ -2905,14 +2908,14 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
wm[3] = (sskpd >> 20) & 0x1FF;
wm[4] = (sskpd >> 32) & 0x1FF;
} else if (INTEL_GEN(dev_priv) >= 6) {
- u32 sskpd = I915_READ(MCH_SSKPD);
+ u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD);
wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
} else if (INTEL_GEN(dev_priv) >= 5) {
- u32 mltr = I915_READ(MLTR_ILK);
+ u32 mltr = intel_uncore_read(uncore, MLTR_ILK);
/* ILK primary LP0 latency is 700 ns */
wm[0] = 7;
@@ -3677,13 +3680,10 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
return 0;
DRM_DEBUG_KMS("Enabling SAGV\n");
- mutex_lock(&dev_priv->pcu_lock);
-
ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_ENABLE);
/* We don't need to wait for SAGV when enabling */
- mutex_unlock(&dev_priv->pcu_lock);
/*
* Some skl systems, pre-release machines in particular,
@@ -3714,15 +3714,11 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
return 0;
DRM_DEBUG_KMS("Disabling SAGV\n");
- mutex_lock(&dev_priv->pcu_lock);
-
/* bspec says to keep retrying for at least 1 ms */
ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_DISABLE,
GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
1);
- mutex_unlock(&dev_priv->pcu_lock);
-
/*
* Some skl systems, pre-release machines in particular,
* don't actually have SAGV.
@@ -3763,14 +3759,16 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
sagv_block_time_us = 10;
/*
- * SKL+ workaround: bspec recommends we disable SAGV when we have
- * more then one pipe enabled
- *
* If there are no active CRTCs, no additional checks need be performed
*/
if (hweight32(intel_state->active_crtcs) == 0)
return true;
- else if (hweight32(intel_state->active_crtcs) > 1)
+
+ /*
+ * SKL+ workaround: bspec recommends we disable SAGV when we have
+ * more then one pipe enabled
+ */
+ if (hweight32(intel_state->active_crtcs) > 1)
return false;
/* Since we're now guaranteed to only have one active CRTC... */
@@ -4370,15 +4368,16 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
}
- if (INTEL_GEN(dev_priv) < 11)
+ if (INTEL_GEN(dev_priv) >= 11)
+ total_data_rate =
+ icl_get_total_relative_data_rate(cstate,
+ plane_data_rate);
+ else
total_data_rate =
skl_get_total_relative_data_rate(cstate,
plane_data_rate,
uv_plane_data_rate);
- else
- total_data_rate =
- icl_get_total_relative_data_rate(cstate,
- plane_data_rate);
+
skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate,
ddb, alloc, &num_active);
@@ -4787,9 +4786,11 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
return;
}
- /* Display WA #1141: kbl,cfl */
- if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
- IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0)) &&
+ /*
+ * WaIncreaseLatencyIPCEnabled: kbl,cfl
+ * Display WA #1141: kbl,cfl
+ */
+ if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) ||
dev_priv->ipc_enabled)
latency += 4;
@@ -6139,7 +6140,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
wm->level = VLV_WM_LEVEL_PM2;
if (IS_CHERRYVIEW(dev_priv)) {
- mutex_lock(&dev_priv->pcu_lock);
+ vlv_punit_get(dev_priv);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
if (val & DSP_MAXFIFO_PM5_ENABLE)
@@ -6169,7 +6170,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
wm->level = VLV_WM_LEVEL_DDR_DVFS;
}
- mutex_unlock(&dev_priv->pcu_lock);
+ vlv_punit_put(dev_priv);
}
for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -6378,16 +6379,25 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
I915_WRITE(DISP_ARB_CTL2, val);
}
+static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
+{
+ /* Display WA #0477 WaDisableIPC: skl */
+ if (IS_SKYLAKE(dev_priv))
+ return false;
+
+ /* Display WA #1141: SKL:all KBL:all CFL */
+ if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ return dev_priv->dram_info.symmetric_memory;
+
+ return true;
+}
+
void intel_init_ipc(struct drm_i915_private *dev_priv)
{
if (!HAS_IPC(dev_priv))
return;
- /* Display WA #1141: SKL:all KBL:all CFL */
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
- dev_priv->ipc_enabled = dev_priv->dram_info.symmetric_memory;
- else
- dev_priv->ipc_enabled = true;
+ dev_priv->ipc_enabled = intel_can_enable_ipc(dev_priv);
intel_enable_ipc(dev_priv);
}
@@ -6397,13 +6407,14 @@ void intel_init_ipc(struct drm_i915_private *dev_priv)
*/
DEFINE_SPINLOCK(mchdev_lock);
-bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
+bool ironlake_set_drps(struct drm_i915_private *i915, u8 val)
{
+ struct intel_uncore *uncore = &i915->uncore;
u16 rgvswctl;
lockdep_assert_held(&mchdev_lock);
- rgvswctl = I915_READ16(MEMSWCTL);
+ rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
if (rgvswctl & MEMCTL_CMD_STS) {
DRM_DEBUG("gpu busy, RCS change rejected\n");
return false; /* still busy with another command */
@@ -6411,37 +6422,38 @@ bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
- I915_WRITE16(MEMSWCTL, rgvswctl);
- POSTING_READ16(MEMSWCTL);
+ intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
+ intel_uncore_posting_read16(uncore, MEMSWCTL);
rgvswctl |= MEMCTL_CMD_STS;
- I915_WRITE16(MEMSWCTL, rgvswctl);
+ intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
return true;
}
static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 rgvmodectl;
u8 fmax, fmin, fstart, vstart;
spin_lock_irq(&mchdev_lock);
- rgvmodectl = I915_READ(MEMMODECTL);
+ rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
/* Enable temp reporting */
- I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
- I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
+ intel_uncore_write16(uncore, PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
+ intel_uncore_write16(uncore, TSC1, I915_READ(TSC1) | TSE);
/* 100ms RC evaluation intervals */
- I915_WRITE(RCUPEI, 100000);
- I915_WRITE(RCDNEI, 100000);
+ intel_uncore_write(uncore, RCUPEI, 100000);
+ intel_uncore_write(uncore, RCDNEI, 100000);
/* Set max/min thresholds to 90ms and 80ms respectively */
- I915_WRITE(RCBMAXAVG, 90000);
- I915_WRITE(RCBMINAVG, 80000);
+ intel_uncore_write(uncore, RCBMAXAVG, 90000);
+ intel_uncore_write(uncore, RCBMINAVG, 80000);
- I915_WRITE(MEMIHYST, 1);
+ intel_uncore_write(uncore, MEMIHYST, 1);
/* Set up min, max, and cur for interrupt handling */
fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
@@ -6449,8 +6461,8 @@ static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
MEMMODE_FSTART_SHIFT;
- vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
- PXVFREQ_PX_SHIFT;
+ vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
+ PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
dev_priv->ips.fstart = fstart;
@@ -6462,53 +6474,66 @@ static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
fmax, fmin, fstart);
- I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+ intel_uncore_write(uncore,
+ MEMINTREN,
+ MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
/*
* Interrupts will be enabled in ironlake_irq_postinstall
*/
- I915_WRITE(VIDSTART, vstart);
- POSTING_READ(VIDSTART);
+ intel_uncore_write(uncore, VIDSTART, vstart);
+ intel_uncore_posting_read(uncore, VIDSTART);
rgvmodectl |= MEMMODE_SWMODE_EN;
- I915_WRITE(MEMMODECTL, rgvmodectl);
+ intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
- if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+ if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
+ MEMCTL_CMD_STS) == 0, 10))
DRM_ERROR("stuck trying to change perf mode\n");
mdelay(1);
ironlake_set_drps(dev_priv, fstart);
- dev_priv->ips.last_count1 = I915_READ(DMIEC) +
- I915_READ(DDREC) + I915_READ(CSIEC);
+ dev_priv->ips.last_count1 =
+ intel_uncore_read(uncore, DMIEC) +
+ intel_uncore_read(uncore, DDREC) +
+ intel_uncore_read(uncore, CSIEC);
dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
- dev_priv->ips.last_count2 = I915_READ(GFXEC);
+ dev_priv->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
dev_priv->ips.last_time2 = ktime_get_raw_ns();
spin_unlock_irq(&mchdev_lock);
}
-static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
+static void ironlake_disable_drps(struct drm_i915_private *i915)
{
+ struct intel_uncore *uncore = &i915->uncore;
u16 rgvswctl;
spin_lock_irq(&mchdev_lock);
- rgvswctl = I915_READ16(MEMSWCTL);
+ rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
/* Ack interrupts, disable EFC interrupt */
- I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
- I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
- I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
- I915_WRITE(DEIIR, DE_PCU_EVENT);
- I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
+ intel_uncore_write(uncore,
+ MEMINTREN,
+ intel_uncore_read(uncore, MEMINTREN) &
+ ~MEMINT_EVAL_CHG_EN);
+ intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
+ intel_uncore_write(uncore,
+ DEIER,
+ intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT);
+ intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT);
+ intel_uncore_write(uncore,
+ DEIMR,
+ intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT);
/* Go back to the starting frequency */
- ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
+ ironlake_set_drps(i915, i915->ips.fstart);
mdelay(1);
rgvswctl |= MEMCTL_CMD_STS;
- I915_WRITE(MEMSWCTL, rgvswctl);
+ intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
mdelay(1);
spin_unlock_irq(&mchdev_lock);
@@ -6743,7 +6768,9 @@ static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
if (val != dev_priv->gt_pm.rps.cur_freq) {
+ vlv_punit_get(dev_priv);
err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
+ vlv_punit_put(dev_priv);
if (err)
return err;
@@ -6796,7 +6823,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
- mutex_lock(&dev_priv->pcu_lock);
+ mutex_lock(&rps->lock);
if (rps->enabled) {
u8 freq;
@@ -6819,7 +6846,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
rps->max_freq_softlimit)))
DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
}
- mutex_unlock(&dev_priv->pcu_lock);
+ mutex_unlock(&rps->lock);
}
void gen6_rps_idle(struct drm_i915_private *dev_priv)
@@ -6833,7 +6860,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
*/
gen6_disable_rps_interrupts(dev_priv);
- mutex_lock(&dev_priv->pcu_lock);
+ mutex_lock(&rps->lock);
if (rps->enabled) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_set_rps_idle(dev_priv);
@@ -6843,7 +6870,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_PMINTRMSK,
gen6_sanitize_rps_pm_mask(dev_priv, ~0));
}
- mutex_unlock(&dev_priv->pcu_lock);
+ mutex_unlock(&rps->lock);
}
void gen6_rps_boost(struct i915_request *rq)
@@ -6883,7 +6910,7 @@ int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
struct intel_rps *rps = &dev_priv->gt_pm.rps;
int err;
- lockdep_assert_held(&dev_priv->pcu_lock);
+ lockdep_assert_held(&rps->lock);
GEM_BUG_ON(val > rps->max_freq);
GEM_BUG_ON(val < rps->min_freq);
@@ -7013,8 +7040,10 @@ static bool sanitize_rc6(struct drm_i915_private *i915)
struct intel_device_info *info = mkwrite_device_info(i915);
/* Powersaving is controlled by the host when inside a VM */
- if (intel_vgpu_active(i915))
+ if (intel_vgpu_active(i915)) {
info->has_rc6 = 0;
+ info->has_rps = false;
+ }
if (info->has_rc6 &&
IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(i915)) {
@@ -7062,7 +7091,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
if (sandybridge_pcode_read(dev_priv,
HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
- &ddcc_status) == 0)
+ &ddcc_status, NULL) == 0)
rps->efficient_freq =
clamp_t(u8,
((ddcc_status >> 8) & 0xff),
@@ -7409,7 +7438,8 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
GEN6_RC_CTL_HW_ENABLE);
rc6vids = 0;
- ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+ ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
+ &rc6vids, NULL);
if (IS_GEN(dev_priv, 6) && ret) {
DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
} else if (IS_GEN(dev_priv, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
@@ -7454,7 +7484,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
unsigned int max_gpu_freq, min_gpu_freq;
struct cpufreq_policy *policy;
- WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
+ lockdep_assert_held(&rps->lock);
if (rps->max_freq <= rps->min_freq)
return;
@@ -7753,6 +7783,11 @@ static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
valleyview_setup_pctx(dev_priv);
+ vlv_iosf_sb_get(dev_priv,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
vlv_init_gpll_ref_freq(dev_priv);
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
@@ -7790,6 +7825,11 @@ static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
intel_gpu_freq(dev_priv, rps->min_freq),
rps->min_freq);
+
+ vlv_iosf_sb_put(dev_priv,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
}
static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
@@ -7799,11 +7839,14 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
cherryview_setup_pctx(dev_priv);
+ vlv_iosf_sb_get(dev_priv,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
vlv_init_gpll_ref_freq(dev_priv);
- mutex_lock(&dev_priv->sb_lock);
val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
- mutex_unlock(&dev_priv->sb_lock);
switch ((val >> 2) & 0x7) {
case 3:
@@ -7836,6 +7879,11 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
intel_gpu_freq(dev_priv, rps->min_freq),
rps->min_freq);
+ vlv_iosf_sb_put(dev_priv,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
rps->min_freq) & 1,
"Odd GPU freq values\n");
@@ -7923,13 +7971,15 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
GEN6_RP_DOWN_IDLE_AVG);
/* Setting Fixed Bias */
- val = VLV_OVERRIDE_EN |
- VLV_SOC_TDP_EN |
- CHV_BIAS_CPU_50_SOC_50;
+ vlv_punit_get(dev_priv);
+
+ val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+ vlv_punit_put(dev_priv);
+
/* RPS code assumes GPLL is used */
WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
@@ -8006,14 +8056,16 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_CONT);
+ vlv_punit_get(dev_priv);
+
/* Setting Fixed Bias */
- val = VLV_OVERRIDE_EN |
- VLV_SOC_TDP_EN |
- VLV_BIAS_CPU_125_SOC_875;
+ val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+ vlv_punit_put(dev_priv);
+
/* RPS code assumes GPLL is used */
WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
@@ -8116,7 +8168,7 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
if (!IS_GEN(dev_priv, 5))
return 0;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
spin_lock_irq(&mchdev_lock);
val = __i915_chipset_val(dev_priv);
spin_unlock_irq(&mchdev_lock);
@@ -8125,15 +8177,15 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
return val;
}
-unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
+unsigned long i915_mch_val(struct drm_i915_private *i915)
{
unsigned long m, x, b;
u32 tsfs;
- tsfs = I915_READ(TSFS);
+ tsfs = intel_uncore_read(&i915->uncore, TSFS);
m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
- x = I915_READ8(TR1);
+ x = intel_uncore_read8(&i915->uncore, TR1);
b = tsfs & TSFS_INTR_MASK;
@@ -8202,7 +8254,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
if (!IS_GEN(dev_priv, 5))
return;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
spin_lock_irq(&mchdev_lock);
__i915_update_gfx_val(dev_priv);
spin_unlock_irq(&mchdev_lock);
@@ -8254,7 +8306,7 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
if (!IS_GEN(dev_priv, 5))
return 0;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
spin_lock_irq(&mchdev_lock);
val = __i915_gfx_val(dev_priv);
spin_unlock_irq(&mchdev_lock);
@@ -8295,7 +8347,7 @@ unsigned long i915_read_mch_val(void)
if (!i915)
return 0;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
spin_lock_irq(&mchdev_lock);
chipset_val = __i915_chipset_val(i915);
graphics_val = __i915_gfx_val(i915);
@@ -8517,8 +8569,6 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
pm_runtime_get(&dev_priv->drm.pdev->dev);
}
- mutex_lock(&dev_priv->pcu_lock);
-
/* Initialize RPS limits (for userspace) */
if (IS_CHERRYVIEW(dev_priv))
cherryview_init_gt_powersave(dev_priv);
@@ -8528,24 +8578,16 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
gen6_init_rps_frequencies(dev_priv);
/* Derive initial user preferences/limits from the hardware limits */
- rps->idle_freq = rps->min_freq;
- rps->cur_freq = rps->idle_freq;
-
rps->max_freq_softlimit = rps->max_freq;
rps->min_freq_softlimit = rps->min_freq;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- rps->min_freq_softlimit =
- max_t(int,
- rps->efficient_freq,
- intel_freq_opcode(dev_priv, 450));
-
/* After setting max-softlimit, find the overclock max freq */
if (IS_GEN(dev_priv, 6) ||
IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
u32 params = 0;
- sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &params);
+ sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS,
+ &params, NULL);
if (params & BIT(31)) { /* OC supported */
DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
(rps->max_freq & 0xff) * 50,
@@ -8556,8 +8598,8 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
/* Finally allow us to boost to max by default */
rps->boost_freq = rps->max_freq;
-
- mutex_unlock(&dev_priv->pcu_lock);
+ rps->idle_freq = rps->min_freq;
+ rps->cur_freq = rps->idle_freq;
}
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
@@ -8583,7 +8625,7 @@ void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
{
- lockdep_assert_held(&i915->pcu_lock);
+ lockdep_assert_held(&i915->gt_pm.rps.lock);
if (!i915->gt_pm.llc_pstate.enabled)
return;
@@ -8595,7 +8637,7 @@ static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
static void intel_disable_rc6(struct drm_i915_private *dev_priv)
{
- lockdep_assert_held(&dev_priv->pcu_lock);
+ lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
if (!dev_priv->gt_pm.rc6.enabled)
return;
@@ -8614,7 +8656,7 @@ static void intel_disable_rc6(struct drm_i915_private *dev_priv)
static void intel_disable_rps(struct drm_i915_private *dev_priv)
{
- lockdep_assert_held(&dev_priv->pcu_lock);
+ lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
if (!dev_priv->gt_pm.rps.enabled)
return;
@@ -8635,19 +8677,19 @@ static void intel_disable_rps(struct drm_i915_private *dev_priv)
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
{
- mutex_lock(&dev_priv->pcu_lock);
+ mutex_lock(&dev_priv->gt_pm.rps.lock);
intel_disable_rc6(dev_priv);
intel_disable_rps(dev_priv);
if (HAS_LLC(dev_priv))
intel_disable_llc_pstate(dev_priv);
- mutex_unlock(&dev_priv->pcu_lock);
+ mutex_unlock(&dev_priv->gt_pm.rps.lock);
}
static inline void intel_enable_llc_pstate(struct drm_i915_private *i915)
{
- lockdep_assert_held(&i915->pcu_lock);
+ lockdep_assert_held(&i915->gt_pm.rps.lock);
if (i915->gt_pm.llc_pstate.enabled)
return;
@@ -8659,7 +8701,7 @@ static inline void intel_enable_llc_pstate(struct drm_i915_private *i915)
static void intel_enable_rc6(struct drm_i915_private *dev_priv)
{
- lockdep_assert_held(&dev_priv->pcu_lock);
+ lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
if (dev_priv->gt_pm.rc6.enabled)
return;
@@ -8684,7 +8726,7 @@ static void intel_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
- lockdep_assert_held(&dev_priv->pcu_lock);
+ lockdep_assert_held(&rps->lock);
if (rps->enabled)
return;
@@ -8719,15 +8761,16 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
if (intel_vgpu_active(dev_priv))
return;
- mutex_lock(&dev_priv->pcu_lock);
+ mutex_lock(&dev_priv->gt_pm.rps.lock);
if (HAS_RC6(dev_priv))
intel_enable_rc6(dev_priv);
- intel_enable_rps(dev_priv);
+ if (HAS_RPS(dev_priv))
+ intel_enable_rps(dev_priv);
if (HAS_LLC(dev_priv))
intel_enable_llc_pstate(dev_priv);
- mutex_unlock(&dev_priv->pcu_lock);
+ mutex_unlock(&dev_priv->gt_pm.rps.lock);
}
static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -9476,16 +9519,21 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
{
- I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
- I915_WRITE(RENCLK_GATE_D2, 0);
- I915_WRITE(DSPCLK_GATE_D, 0);
- I915_WRITE(RAMCLK_GATE_D, 0);
- I915_WRITE16(DEUC, 0);
- I915_WRITE(MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
+ intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
+ intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
+ intel_uncore_write(uncore, DSPCLK_GATE_D, 0);
+ intel_uncore_write(uncore, RAMCLK_GATE_D, 0);
+ intel_uncore_write16(uncore, DEUC, 0);
+ intel_uncore_write(uncore,
+ MI_ARB_STATE,
+ _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
/* WaDisable_RenderCache_OperationalFlush:gen4 */
- I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+ intel_uncore_write(uncore,
+ CACHE_MODE_0,
+ _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
}
static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -9698,221 +9746,6 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
}
}
-static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
-{
- u32 flags =
- I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
-
- switch (flags) {
- case GEN6_PCODE_SUCCESS:
- return 0;
- case GEN6_PCODE_UNIMPLEMENTED_CMD:
- return -ENODEV;
- case GEN6_PCODE_ILLEGAL_CMD:
- return -ENXIO;
- case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
- case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
- return -EOVERFLOW;
- case GEN6_PCODE_TIMEOUT:
- return -ETIMEDOUT;
- default:
- MISSING_CASE(flags);
- return 0;
- }
-}
-
-static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
-{
- u32 flags =
- I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
-
- switch (flags) {
- case GEN6_PCODE_SUCCESS:
- return 0;
- case GEN6_PCODE_ILLEGAL_CMD:
- return -ENXIO;
- case GEN7_PCODE_TIMEOUT:
- return -ETIMEDOUT;
- case GEN7_PCODE_ILLEGAL_DATA:
- return -EINVAL;
- case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
- return -EOVERFLOW;
- default:
- MISSING_CASE(flags);
- return 0;
- }
-}
-
-int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
-{
- int status;
-
- WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
-
- /* GEN6_PCODE_* are outside of the forcewake domain, we can
- * use te fw I915_READ variants to reduce the amount of work
- * required when reading/writing.
- */
-
- if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
- DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps\n",
- mbox, __builtin_return_address(0));
- return -EAGAIN;
- }
-
- I915_WRITE_FW(GEN6_PCODE_DATA, *val);
- I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
- I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
-
- if (__intel_wait_for_register_fw(&dev_priv->uncore,
- GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
- 500, 0, NULL)) {
- DRM_ERROR("timeout waiting for pcode read (from mbox %x) to finish for %ps\n",
- mbox, __builtin_return_address(0));
- return -ETIMEDOUT;
- }
-
- *val = I915_READ_FW(GEN6_PCODE_DATA);
- I915_WRITE_FW(GEN6_PCODE_DATA, 0);
-
- if (INTEL_GEN(dev_priv) > 6)
- status = gen7_check_mailbox_status(dev_priv);
- else
- status = gen6_check_mailbox_status(dev_priv);
-
- if (status) {
- DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
- mbox, __builtin_return_address(0), status);
- return status;
- }
-
- return 0;
-}
-
-int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
- u32 mbox, u32 val,
- int fast_timeout_us, int slow_timeout_ms)
-{
- int status;
-
- WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
-
- /* GEN6_PCODE_* are outside of the forcewake domain, we can
- * use te fw I915_READ variants to reduce the amount of work
- * required when reading/writing.
- */
-
- if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
- DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps\n",
- val, mbox, __builtin_return_address(0));
- return -EAGAIN;
- }
-
- I915_WRITE_FW(GEN6_PCODE_DATA, val);
- I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
- I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
-
- if (__intel_wait_for_register_fw(&dev_priv->uncore,
- GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
- fast_timeout_us, slow_timeout_ms,
- NULL)) {
- DRM_ERROR("timeout waiting for pcode write of 0x%08x to mbox %x to finish for %ps\n",
- val, mbox, __builtin_return_address(0));
- return -ETIMEDOUT;
- }
-
- I915_WRITE_FW(GEN6_PCODE_DATA, 0);
-
- if (INTEL_GEN(dev_priv) > 6)
- status = gen7_check_mailbox_status(dev_priv);
- else
- status = gen6_check_mailbox_status(dev_priv);
-
- if (status) {
- DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
- val, mbox, __builtin_return_address(0), status);
- return status;
- }
-
- return 0;
-}
-
-static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
- u32 request, u32 reply_mask, u32 reply,
- u32 *status)
-{
- u32 val = request;
-
- *status = sandybridge_pcode_read(dev_priv, mbox, &val);
-
- return *status || ((val & reply_mask) == reply);
-}
-
-/**
- * skl_pcode_request - send PCODE request until acknowledgment
- * @dev_priv: device private
- * @mbox: PCODE mailbox ID the request is targeted for
- * @request: request ID
- * @reply_mask: mask used to check for request acknowledgment
- * @reply: value used to check for request acknowledgment
- * @timeout_base_ms: timeout for polling with preemption enabled
- *
- * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
- * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
- * The request is acknowledged once the PCODE reply dword equals @reply after
- * applying @reply_mask. Polling is first attempted with preemption enabled
- * for @timeout_base_ms and if this times out for another 50 ms with
- * preemption disabled.
- *
- * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
- * other error as reported by PCODE.
- */
-int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
- u32 reply_mask, u32 reply, int timeout_base_ms)
-{
- u32 status;
- int ret;
-
- WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
-
-#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
- &status)
-
- /*
- * Prime the PCODE by doing a request first. Normally it guarantees
- * that a subsequent request, at most @timeout_base_ms later, succeeds.
- * _wait_for() doesn't guarantee when its passed condition is evaluated
- * first, so send the first request explicitly.
- */
- if (COND) {
- ret = 0;
- goto out;
- }
- ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
- if (!ret)
- goto out;
-
- /*
- * The above can time out if the number of requests was low (2 in the
- * worst case) _and_ PCODE was busy for some reason even after a
- * (queued) request and @timeout_base_ms delay. As a workaround retry
- * the poll with preemption disabled to maximize the number of
- * requests. Increase the timeout from @timeout_base_ms to 50ms to
- * account for interrupts that could reduce the number of these
- * requests, and for any quirks of the PCODE firmware that delays
- * the request completion.
- */
- DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
- WARN_ON_ONCE(timeout_base_ms > 3);
- preempt_disable();
- ret = wait_for_atomic(COND, 50);
- preempt_enable();
-
-out:
- return ret ? ret : status;
-#undef COND
-}
-
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
@@ -9978,7 +9811,7 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
void intel_pm_setup(struct drm_i915_private *dev_priv)
{
- mutex_init(&dev_priv->pcu_lock);
+ mutex_init(&dev_priv->gt_pm.rps.lock);
mutex_init(&dev_priv->gt_pm.rps.power.mutex);
atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
@@ -10108,6 +9941,12 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
return mul_u64_u32_div(time_hw, mul, div);
}
+u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
+ i915_reg_t reg)
+{
+ return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
+}
+
u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat)
{
u32 cagf;
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 674a3f0f16a7..1b489fa399e1 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
+#include "i915_reg.h"
+
struct drm_atomic_state;
struct drm_device;
struct drm_i915_private;
@@ -68,4 +70,21 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
void intel_init_ipc(struct drm_i915_private *dev_priv);
void intel_enable_ipc(struct drm_i915_private *dev_priv);
+int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
+int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
+u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, i915_reg_t reg);
+u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, i915_reg_t reg);
+
+u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
+
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
+unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
+void i915_update_gfx_val(struct drm_i915_private *dev_priv);
+
+bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
+int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
+void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive);
+bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
+
#endif /* __INTEL_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 6150e35bf7b5..502c54428570 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -32,11 +32,6 @@
#include <drm/drm_print.h>
#include "i915_drv.h"
-#include "intel_cdclk.h"
-#include "intel_crt.h"
-#include "intel_csr.h"
-#include "intel_dp.h"
-#include "intel_drv.h"
/**
* DOC: runtime pm
@@ -80,24 +75,18 @@ static void __print_depot_stack(depot_stack_handle_t stack,
stack_trace_snprint(buf, sz, entries, nr_entries, indent);
}
-static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
- struct i915_runtime_pm *rpm = &i915->runtime_pm;
-
spin_lock_init(&rpm->debug.lock);
}
static noinline depot_stack_handle_t
-track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
- struct i915_runtime_pm *rpm = &i915->runtime_pm;
depot_stack_handle_t stack, *stacks;
unsigned long flags;
- atomic_inc(&rpm->wakeref_count);
- assert_rpm_wakelock_held(i915);
-
- if (!HAS_RUNTIME_PM(i915))
+ if (!rpm->available)
return -1;
stack = __save_depot_stack();
@@ -124,10 +113,9 @@ track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
return stack;
}
-static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
- depot_stack_handle_t stack)
+static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
+ depot_stack_handle_t stack)
{
- struct i915_runtime_pm *rpm = &i915->runtime_pm;
unsigned long flags, n;
bool found = false;
@@ -220,41 +208,66 @@ __print_intel_runtime_pm_wakeref(struct drm_printer *p,
}
static noinline void
-untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+__untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
+ struct intel_runtime_pm_debug *saved)
{
- struct i915_runtime_pm *rpm = &i915->runtime_pm;
- struct intel_runtime_pm_debug dbg = {};
- struct drm_printer p;
- unsigned long flags;
+ *saved = *debug;
- assert_rpm_wakelock_held(i915);
- if (atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
- &rpm->debug.lock,
- flags)) {
- dbg = rpm->debug;
+ debug->owners = NULL;
+ debug->count = 0;
+ debug->last_release = __save_depot_stack();
+}
- rpm->debug.owners = NULL;
- rpm->debug.count = 0;
- rpm->debug.last_release = __save_depot_stack();
+static void
+dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
+{
+ struct drm_printer p;
- spin_unlock_irqrestore(&rpm->debug.lock, flags);
- }
- if (!dbg.count)
+ if (!debug->count)
return;
p = drm_debug_printer("i915");
- __print_intel_runtime_pm_wakeref(&p, &dbg);
+ __print_intel_runtime_pm_wakeref(&p, debug);
- kfree(dbg.owners);
+ kfree(debug->owners);
}
-void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+static noinline void
+__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
+{
+ struct intel_runtime_pm_debug dbg = {};
+ unsigned long flags;
+
+ if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
+ &rpm->debug.lock,
+ flags))
+ return;
+
+ __untrack_all_wakerefs(&rpm->debug, &dbg);
+ spin_unlock_irqrestore(&rpm->debug.lock, flags);
+
+ dump_and_free_wakeref_tracking(&dbg);
+}
+
+static noinline void
+untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
+{
+ struct intel_runtime_pm_debug dbg = {};
+ unsigned long flags;
+
+ spin_lock_irqsave(&rpm->debug.lock, flags);
+ __untrack_all_wakerefs(&rpm->debug, &dbg);
+ spin_unlock_irqrestore(&rpm->debug.lock, flags);
+
+ dump_and_free_wakeref_tracking(&dbg);
+}
+
+void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
struct drm_printer *p)
{
struct intel_runtime_pm_debug dbg = {};
do {
- struct i915_runtime_pm *rpm = &i915->runtime_pm;
unsigned long alloc = dbg.count;
depot_stack_handle_t *s;
@@ -288,4041 +301,97 @@ out:
#else
-static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
}
static depot_stack_handle_t
-track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
- atomic_inc(&i915->runtime_pm.wakeref_count);
- assert_rpm_wakelock_held(i915);
return -1;
}
-static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
-{
- assert_rpm_wakelock_held(i915);
- atomic_dec(&i915->runtime_pm.wakeref_count);
-}
-
-#endif
-
-bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
- enum i915_power_well_id power_well_id);
-
-const char *
-intel_display_power_domain_str(enum intel_display_power_domain domain)
-{
- switch (domain) {
- case POWER_DOMAIN_PIPE_A:
- return "PIPE_A";
- case POWER_DOMAIN_PIPE_B:
- return "PIPE_B";
- case POWER_DOMAIN_PIPE_C:
- return "PIPE_C";
- case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
- return "PIPE_A_PANEL_FITTER";
- case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
- return "PIPE_B_PANEL_FITTER";
- case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
- return "PIPE_C_PANEL_FITTER";
- case POWER_DOMAIN_TRANSCODER_A:
- return "TRANSCODER_A";
- case POWER_DOMAIN_TRANSCODER_B:
- return "TRANSCODER_B";
- case POWER_DOMAIN_TRANSCODER_C:
- return "TRANSCODER_C";
- case POWER_DOMAIN_TRANSCODER_EDP:
- return "TRANSCODER_EDP";
- case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
- return "TRANSCODER_EDP_VDSC";
- case POWER_DOMAIN_TRANSCODER_DSI_A:
- return "TRANSCODER_DSI_A";
- case POWER_DOMAIN_TRANSCODER_DSI_C:
- return "TRANSCODER_DSI_C";
- case POWER_DOMAIN_PORT_DDI_A_LANES:
- return "PORT_DDI_A_LANES";
- case POWER_DOMAIN_PORT_DDI_B_LANES:
- return "PORT_DDI_B_LANES";
- case POWER_DOMAIN_PORT_DDI_C_LANES:
- return "PORT_DDI_C_LANES";
- case POWER_DOMAIN_PORT_DDI_D_LANES:
- return "PORT_DDI_D_LANES";
- case POWER_DOMAIN_PORT_DDI_E_LANES:
- return "PORT_DDI_E_LANES";
- case POWER_DOMAIN_PORT_DDI_F_LANES:
- return "PORT_DDI_F_LANES";
- case POWER_DOMAIN_PORT_DDI_A_IO:
- return "PORT_DDI_A_IO";
- case POWER_DOMAIN_PORT_DDI_B_IO:
- return "PORT_DDI_B_IO";
- case POWER_DOMAIN_PORT_DDI_C_IO:
- return "PORT_DDI_C_IO";
- case POWER_DOMAIN_PORT_DDI_D_IO:
- return "PORT_DDI_D_IO";
- case POWER_DOMAIN_PORT_DDI_E_IO:
- return "PORT_DDI_E_IO";
- case POWER_DOMAIN_PORT_DDI_F_IO:
- return "PORT_DDI_F_IO";
- case POWER_DOMAIN_PORT_DSI:
- return "PORT_DSI";
- case POWER_DOMAIN_PORT_CRT:
- return "PORT_CRT";
- case POWER_DOMAIN_PORT_OTHER:
- return "PORT_OTHER";
- case POWER_DOMAIN_VGA:
- return "VGA";
- case POWER_DOMAIN_AUDIO:
- return "AUDIO";
- case POWER_DOMAIN_PLLS:
- return "PLLS";
- case POWER_DOMAIN_AUX_A:
- return "AUX_A";
- case POWER_DOMAIN_AUX_B:
- return "AUX_B";
- case POWER_DOMAIN_AUX_C:
- return "AUX_C";
- case POWER_DOMAIN_AUX_D:
- return "AUX_D";
- case POWER_DOMAIN_AUX_E:
- return "AUX_E";
- case POWER_DOMAIN_AUX_F:
- return "AUX_F";
- case POWER_DOMAIN_AUX_IO_A:
- return "AUX_IO_A";
- case POWER_DOMAIN_AUX_TBT1:
- return "AUX_TBT1";
- case POWER_DOMAIN_AUX_TBT2:
- return "AUX_TBT2";
- case POWER_DOMAIN_AUX_TBT3:
- return "AUX_TBT3";
- case POWER_DOMAIN_AUX_TBT4:
- return "AUX_TBT4";
- case POWER_DOMAIN_GMBUS:
- return "GMBUS";
- case POWER_DOMAIN_INIT:
- return "INIT";
- case POWER_DOMAIN_MODESET:
- return "MODESET";
- case POWER_DOMAIN_GT_IRQ:
- return "GT_IRQ";
- default:
- MISSING_CASE(domain);
- return "?";
- }
-}
-
-static void intel_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
- power_well->desc->ops->enable(dev_priv, power_well);
- power_well->hw_enabled = true;
-}
-
-static void intel_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
- power_well->hw_enabled = false;
- power_well->desc->ops->disable(dev_priv, power_well);
-}
-
-static void intel_power_well_get(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- if (!power_well->count++)
- intel_power_well_enable(dev_priv, power_well);
-}
-
-static void intel_power_well_put(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- WARN(!power_well->count, "Use count on power well %s is already zero",
- power_well->desc->name);
-
- if (!--power_well->count)
- intel_power_well_disable(dev_priv, power_well);
-}
-
-/**
- * __intel_display_power_is_enabled - unlocked check for a power domain
- * @dev_priv: i915 device instance
- * @domain: power domain to check
- *
- * This is the unlocked version of intel_display_power_is_enabled() and should
- * only be used from error capture and recovery code where deadlocks are
- * possible.
- *
- * Returns:
- * True when the power domain is enabled, false otherwise.
- */
-bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_well *power_well;
- bool is_enabled;
-
- if (dev_priv->runtime_pm.suspended)
- return false;
-
- is_enabled = true;
-
- for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
- if (power_well->desc->always_on)
- continue;
-
- if (!power_well->hw_enabled) {
- is_enabled = false;
- break;
- }
- }
-
- return is_enabled;
-}
-
-/**
- * intel_display_power_is_enabled - check for a power domain
- * @dev_priv: i915 device instance
- * @domain: power domain to check
- *
- * This function can be used to check the hw power domain state. It is mostly
- * used in hardware state readout functions. Everywhere else code should rely
- * upon explicit power domain reference counting to ensure that the hardware
- * block is powered up before accessing it.
- *
- * Callers must hold the relevant modesetting locks to ensure that concurrent
- * threads can't disable the power well while the caller tries to read a few
- * registers.
- *
- * Returns:
- * True when the power domain is enabled, false otherwise.
- */
-bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains;
- bool ret;
-
- power_domains = &dev_priv->power_domains;
-
- mutex_lock(&power_domains->lock);
- ret = __intel_display_power_is_enabled(dev_priv, domain);
- mutex_unlock(&power_domains->lock);
-
- return ret;
-}
-
-/*
- * Starting with Haswell, we have a "Power Down Well" that can be turned off
- * when not needed anymore. We have 4 registers that can request the power well
- * to be enabled, and it will only be disabled if none of the registers is
- * requesting it to be enabled.
- */
-static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
- u8 irq_pipe_mask, bool has_vga)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
-
- /*
- * After we re-enable the power well, if we touch VGA register 0x3d5
- * we'll get unclaimed register interrupts. This stops after we write
- * anything to the VGA MSR register. The vgacon module uses this
- * register all the time, so if we unbind our driver and, as a
- * consequence, bind vgacon, we'll get stuck in an infinite loop at
- * console_unlock(). So make here we touch the VGA MSR register, making
- * sure vgacon can keep working normally without triggering interrupts
- * and error messages.
- */
- if (has_vga) {
- vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
- outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
- vga_put(pdev, VGA_RSRC_LEGACY_IO);
- }
-
- if (irq_pipe_mask)
- gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
-}
-
-static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
- u8 irq_pipe_mask)
-{
- if (irq_pipe_mask)
- gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
-}
-
-
-static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
-
- /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
- WARN_ON(intel_wait_for_register(&dev_priv->uncore,
- regs->driver,
- HSW_PWR_WELL_CTL_STATE(pw_idx),
- HSW_PWR_WELL_CTL_STATE(pw_idx),
- 1));
-}
-
-static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
- const struct i915_power_well_regs *regs,
- int pw_idx)
+static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
+ intel_wakeref_t wref)
{
- u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
- u32 ret;
-
- ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
- ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
- if (regs->kvmr.reg)
- ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
- ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
-
- return ret;
-}
-
-static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
- bool disabled;
- u32 reqs;
-
- /*
- * Bspec doesn't require waiting for PWs to get disabled, but still do
- * this for paranoia. The known cases where a PW will be forced on:
- * - a KVMR request on any power well via the KVMR request register
- * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
- * DEBUG request registers
- * Skip the wait in case any of the request bits are set and print a
- * diagnostic message.
- */
- wait_for((disabled = !(I915_READ(regs->driver) &
- HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
- (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
- if (disabled)
- return;
-
- DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
- power_well->desc->name,
- !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
-}
-
-static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
- enum skl_power_gate pg)
-{
- /* Timeout 5us for PG#0, for other PGs 1us */
- WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
- SKL_FUSE_PG_DIST_STATUS(pg),
- SKL_FUSE_PG_DIST_STATUS(pg), 1));
-}
-
-static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
- bool wait_fuses = power_well->desc->hsw.has_fuses;
- enum skl_power_gate uninitialized_var(pg);
- u32 val;
-
- if (wait_fuses) {
- pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
- SKL_PW_CTL_IDX_TO_PG(pw_idx);
- /*
- * For PW1 we have to wait both for the PW0/PG0 fuse state
- * before enabling the power well and PW1/PG1's own fuse
- * state after the enabling. For all other power wells with
- * fuses we only have to wait for that PW/PG's fuse state
- * after the enabling.
- */
- if (pg == SKL_PG1)
- gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
- }
-
- val = I915_READ(regs->driver);
- I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
- hsw_wait_for_power_well_enable(dev_priv, power_well);
-
- /* Display WA #1178: cnl */
- if (IS_CANNONLAKE(dev_priv) &&
- pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
- pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
- val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
- val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
- I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
- }
-
- if (wait_fuses)
- gen9_wait_for_power_well_fuses(dev_priv, pg);
-
- hsw_power_well_post_enable(dev_priv,
- power_well->desc->hsw.irq_pipe_mask,
- power_well->desc->hsw.has_vga);
-}
-
-static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
- u32 val;
-
- hsw_power_well_pre_disable(dev_priv,
- power_well->desc->hsw.irq_pipe_mask);
-
- val = I915_READ(regs->driver);
- I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
- hsw_wait_for_power_well_disable(dev_priv, power_well);
}
-#define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
-
static void
-icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
- enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
- u32 val;
-
- val = I915_READ(regs->driver);
- I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
-
- val = I915_READ(ICL_PORT_CL_DW12(port));
- I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
-
- hsw_wait_for_power_well_enable(dev_priv, power_well);
-
- /* Display WA #1178: icl */
- if (IS_ICELAKE(dev_priv) &&
- pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
- !intel_bios_is_port_edp(dev_priv, port)) {
- val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
- val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
- I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
- }
+ atomic_dec(&rpm->wakeref_count);
}
static void
-icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
- enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
- u32 val;
-
- val = I915_READ(ICL_PORT_CL_DW12(port));
- I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
-
- val = I915_READ(regs->driver);
- I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
-
- hsw_wait_for_power_well_disable(dev_priv, power_well);
}
-#define ICL_AUX_PW_TO_CH(pw_idx) \
- ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
+#endif
static void
-icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
- u32 val;
-
- val = I915_READ(DP_AUX_CH_CTL(aux_ch));
- val &= ~DP_AUX_CH_CTL_TBT_IO;
- if (power_well->desc->hsw.is_tc_tbt)
- val |= DP_AUX_CH_CTL_TBT_IO;
- I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
-
- hsw_power_well_enable(dev_priv, power_well);
-}
-
-/*
- * We should only use the power well if we explicitly asked the hardware to
- * enable it, so check if it's enabled and also check if we've requested it to
- * be enabled.
- */
-static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- enum i915_power_well_id id = power_well->desc->id;
- int pw_idx = power_well->desc->hsw.idx;
- u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
- HSW_PWR_WELL_CTL_STATE(pw_idx);
- u32 val;
-
- val = I915_READ(regs->driver);
-
- /*
- * On GEN9 big core due to a DMC bug the driver's request bits for PW1
- * and the MISC_IO PW will be not restored, so check instead for the
- * BIOS's own request bits, which are forced-on for these power wells
- * when exiting DC5/6.
- */
- if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
- (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
- val |= I915_READ(regs->bios);
-
- return (val & mask) == mask;
-}
-
-static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
-{
- WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
- "DC9 already programmed to be enabled.\n");
- WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
- "DC5 still not disabled to enable DC9.\n");
- WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
- HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
- "Power well 2 on.\n");
- WARN_ONCE(intel_irqs_enabled(dev_priv),
- "Interrupts not disabled yet.\n");
-
- /*
- * TODO: check for the following to verify the conditions to enter DC9
- * state are satisfied:
- * 1] Check relevant display engine registers to verify if mode set
- * disable sequence was followed.
- * 2] Check if display uninitialize sequence is initialized.
- */
-}
-
-static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
-{
- WARN_ONCE(intel_irqs_enabled(dev_priv),
- "Interrupts not disabled yet.\n");
- WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
- "DC5 still not disabled.\n");
-
- /*
- * TODO: check for the following to verify DC9 state was indeed
- * entered before programming to disable it:
- * 1] Check relevant display engine registers to verify if mode
- * set disable sequence was followed.
- * 2] Check if display uninitialize sequence is initialized.
- */
-}
-
-static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
- u32 state)
+intel_runtime_pm_acquire(struct intel_runtime_pm *rpm, bool wakelock)
{
- int rewrites = 0;
- int rereads = 0;
- u32 v;
-
- I915_WRITE(DC_STATE_EN, state);
-
- /* It has been observed that disabling the dc6 state sometimes
- * doesn't stick and dmc keeps returning old value. Make sure
- * the write really sticks enough times and also force rewrite until
- * we are confident that state is exactly what we want.
- */
- do {
- v = I915_READ(DC_STATE_EN);
-
- if (v != state) {
- I915_WRITE(DC_STATE_EN, state);
- rewrites++;
- rereads = 0;
- } else if (rereads++ > 5) {
- break;
- }
-
- } while (rewrites < 100);
-
- if (v != state)
- DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
- state, v);
-
- /* Most of the times we need one retry, avoid spam */
- if (rewrites > 1)
- DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
- state, rewrites);
-}
-
-static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
-{
- u32 mask;
-
- mask = DC_STATE_EN_UPTO_DC5;
- if (INTEL_GEN(dev_priv) >= 11)
- mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
- else if (IS_GEN9_LP(dev_priv))
- mask |= DC_STATE_EN_DC9;
- else
- mask |= DC_STATE_EN_UPTO_DC6;
-
- return mask;
-}
-
-void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
-
- DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
- dev_priv->csr.dc_state, val);
- dev_priv->csr.dc_state = val;
-}
-
-/**
- * gen9_set_dc_state - set target display C power state
- * @dev_priv: i915 device instance
- * @state: target DC power state
- * - DC_STATE_DISABLE
- * - DC_STATE_EN_UPTO_DC5
- * - DC_STATE_EN_UPTO_DC6
- * - DC_STATE_EN_DC9
- *
- * Signal to DMC firmware/HW the target DC power state passed in @state.
- * DMC/HW can turn off individual display clocks and power rails when entering
- * a deeper DC power state (higher in number) and turns these back when exiting
- * that state to a shallower power state (lower in number). The HW will decide
- * when to actually enter a given state on an on-demand basis, for instance
- * depending on the active state of display pipes. The state of display
- * registers backed by affected power rails are saved/restored as needed.
- *
- * Based on the above enabling a deeper DC power state is asynchronous wrt.
- * enabling it. Disabling a deeper power state is synchronous: for instance
- * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
- * back on and register state is restored. This is guaranteed by the MMIO write
- * to DC_STATE_EN blocking until the state is restored.
- */
-static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
-{
- u32 val;
- u32 mask;
-
- if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
- state &= dev_priv->csr.allowed_dc_mask;
-
- val = I915_READ(DC_STATE_EN);
- mask = gen9_dc_mask(dev_priv);
- DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
- val & mask, state);
-
- /* Check if DMC is ignoring our DC state requests */
- if ((val & mask) != dev_priv->csr.dc_state)
- DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
- dev_priv->csr.dc_state, val & mask);
-
- val &= ~mask;
- val |= state;
-
- gen9_write_dc_state(dev_priv, val);
-
- dev_priv->csr.dc_state = val & mask;
-}
-
-void bxt_enable_dc9(struct drm_i915_private *dev_priv)
-{
- assert_can_enable_dc9(dev_priv);
-
- DRM_DEBUG_KMS("Enabling DC9\n");
- /*
- * Power sequencer reset is not needed on
- * platforms with South Display Engine on PCH,
- * because PPS registers are always on.
- */
- if (!HAS_PCH_SPLIT(dev_priv))
- intel_power_sequencer_reset(dev_priv);
- gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
-}
-
-void bxt_disable_dc9(struct drm_i915_private *dev_priv)
-{
- assert_can_disable_dc9(dev_priv);
-
- DRM_DEBUG_KMS("Disabling DC9\n");
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- intel_pps_unlock_regs_wa(dev_priv);
-}
-
-static void assert_csr_loaded(struct drm_i915_private *dev_priv)
-{
- WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
- "CSR program storage start is NULL\n");
- WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
- WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
-}
-
-static struct i915_power_well *
-lookup_power_well(struct drm_i915_private *dev_priv,
- enum i915_power_well_id power_well_id)
-{
- struct i915_power_well *power_well;
-
- for_each_power_well(dev_priv, power_well)
- if (power_well->desc->id == power_well_id)
- return power_well;
-
- /*
- * It's not feasible to add error checking code to the callers since
- * this condition really shouldn't happen and it doesn't even make sense
- * to abort things like display initialization sequences. Just return
- * the first power well and hope the WARN gets reported so we can fix
- * our driver.
- */
- WARN(1, "Power well %d not defined for this platform\n", power_well_id);
- return &dev_priv->power_domains.power_wells[0];
-}
-
-static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
-{
- bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
- SKL_DISP_PW_2);
-
- WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
-
- WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
- "DC5 already programmed to be enabled.\n");
- assert_rpm_wakelock_held(dev_priv);
-
- assert_csr_loaded(dev_priv);
-}
-
-void gen9_enable_dc5(struct drm_i915_private *dev_priv)
-{
- assert_can_enable_dc5(dev_priv);
-
- DRM_DEBUG_KMS("Enabling DC5\n");
-
- /* Wa Display #1183: skl,kbl,cfl */
- if (IS_GEN9_BC(dev_priv))
- I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
- SKL_SELECT_ALTERNATE_DC_EXIT);
-
- gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
-}
-
-static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
-{
- WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
- "Backlight is not disabled.\n");
- WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
- "DC6 already programmed to be enabled.\n");
-
- assert_csr_loaded(dev_priv);
-}
-
-void skl_enable_dc6(struct drm_i915_private *dev_priv)
-{
- assert_can_enable_dc6(dev_priv);
-
- DRM_DEBUG_KMS("Enabling DC6\n");
-
- /* Wa Display #1183: skl,kbl,cfl */
- if (IS_GEN9_BC(dev_priv))
- I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
- SKL_SELECT_ALTERNATE_DC_EXIT);
-
- gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
-}
-
-static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
- u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
- u32 bios_req = I915_READ(regs->bios);
-
- /* Take over the request bit if set by BIOS. */
- if (bios_req & mask) {
- u32 drv_req = I915_READ(regs->driver);
-
- if (!(drv_req & mask))
- I915_WRITE(regs->driver, drv_req | mask);
- I915_WRITE(regs->bios, bios_req & ~mask);
- }
-}
-
-static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
-}
-
-static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
-}
-
-static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
-}
-
-static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
-{
- struct i915_power_well *power_well;
-
- power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
- if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
-
- power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
- if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
-
- if (IS_GEMINILAKE(dev_priv)) {
- power_well = lookup_power_well(dev_priv,
- GLK_DISP_PW_DPIO_CMN_C);
- if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv,
- power_well->desc->bxt.phy);
- }
-}
-
-static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
-}
-
-static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
-{
- u32 tmp = I915_READ(DBUF_CTL);
-
- WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
- (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
- "Unexpected DBuf power power state (0x%08x)\n", tmp);
-}
-
-static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- struct intel_cdclk_state cdclk_state = {};
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
- /* Can't read out voltage_level so can't use intel_cdclk_changed() */
- WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
-
- gen9_assert_dbuf_enabled(dev_priv);
-
- if (IS_GEN9_LP(dev_priv))
- bxt_verify_ddi_phy_power_wells(dev_priv);
-
- if (INTEL_GEN(dev_priv) >= 11)
- /*
- * DMC retains HW context only for port A, the other combo
- * PHY's HW context for port B is lost after DC transitions,
- * so we need to restore it manually.
- */
- icl_combo_phys_init(dev_priv);
-}
-
-static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- if (!dev_priv->csr.dmc_payload)
- return;
-
- if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
- skl_enable_dc6(dev_priv);
- else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
- gen9_enable_dc5(dev_priv);
-}
-
-static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
-}
-
-static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
-}
-
-static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return true;
-}
-
-static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
- i830_enable_pipe(dev_priv, PIPE_A);
- if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
- i830_enable_pipe(dev_priv, PIPE_B);
-}
-
-static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- i830_disable_pipe(dev_priv, PIPE_B);
- i830_disable_pipe(dev_priv, PIPE_A);
-}
-
-static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
- I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
-}
-
-static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- if (power_well->count > 0)
- i830_pipes_power_well_enable(dev_priv, power_well);
- else
- i830_pipes_power_well_disable(dev_priv, power_well);
-}
-
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well, bool enable)
-{
- int pw_idx = power_well->desc->vlv.idx;
- u32 mask;
- u32 state;
- u32 ctrl;
-
- mask = PUNIT_PWRGT_MASK(pw_idx);
- state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
- PUNIT_PWRGT_PWR_GATE(pw_idx);
-
- mutex_lock(&dev_priv->pcu_lock);
-
-#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
-
- if (COND)
- goto out;
-
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
- ctrl &= ~mask;
- ctrl |= state;
- vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
-
- if (wait_for(COND, 100))
- DRM_ERROR("timeout setting power well state %08x (%08x)\n",
- state,
- vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
-
-#undef COND
-
-out:
- mutex_unlock(&dev_priv->pcu_lock);
-}
-
-static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, true);
-}
-
-static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- int pw_idx = power_well->desc->vlv.idx;
- bool enabled = false;
- u32 mask;
- u32 state;
- u32 ctrl;
-
- mask = PUNIT_PWRGT_MASK(pw_idx);
- ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
-
- mutex_lock(&dev_priv->pcu_lock);
-
- state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
- /*
- * We only ever set the power-on and power-gate states, anything
- * else is unexpected.
- */
- WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
- state != PUNIT_PWRGT_PWR_GATE(pw_idx));
- if (state == ctrl)
- enabled = true;
-
- /*
- * A transient state at this point would mean some unexpected party
- * is poking at the power controls too.
- */
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
- WARN_ON(ctrl != state);
-
- mutex_unlock(&dev_priv->pcu_lock);
-
- return enabled;
-}
-
-static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- /*
- * On driver load, a pipe may be active and driving a DSI display.
- * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
- * (and never recovering) in this case. intel_dsi_post_disable() will
- * clear it when we turn off the display.
- */
- val = I915_READ(DSPCLK_GATE_D);
- val &= DPOUNIT_CLOCK_GATE_DISABLE;
- val |= VRHUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, val);
-
- /*
- * Disable trickle feed and enable pnd deadline calculation
- */
- I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
- I915_WRITE(CBR1_VLV, 0);
-
- WARN_ON(dev_priv->rawclk_freq == 0);
-
- I915_WRITE(RAWCLK_FREQ_VLV,
- DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
-}
-
-static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
-{
- struct intel_encoder *encoder;
- enum pipe pipe;
-
- /*
- * Enable the CRI clock source so we can get at the
- * display and the reference clock for VGA
- * hotplug / manual detection. Supposedly DSI also
- * needs the ref clock up and running.
- *
- * CHV DPLL B/C have some issues if VGA mode is enabled.
- */
- for_each_pipe(dev_priv, pipe) {
- u32 val = I915_READ(DPLL(pipe));
-
- val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
- if (pipe != PIPE_A)
- val |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
- I915_WRITE(DPLL(pipe), val);
- }
-
- vlv_init_display_clock_gating(dev_priv);
-
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_enable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /*
- * During driver initialization/resume we can avoid restoring the
- * part of the HW/SW state that will be inited anyway explicitly.
- */
- if (dev_priv->power_domains.initializing)
- return;
-
- intel_hpd_init(dev_priv);
-
- /* Re-enable the ADPA, if we have one */
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- if (encoder->type == INTEL_OUTPUT_ANALOG)
- intel_crt_reset(&encoder->base);
- }
-
- i915_redisable_vga_power_on(dev_priv);
-
- intel_pps_unlock_regs_wa(dev_priv);
-}
-
-static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
-{
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_disable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /* make sure we're done processing display irqs */
- synchronize_irq(dev_priv->drm.irq);
-
- intel_power_sequencer_reset(dev_priv);
-
- /* Prevent us from re-enabling polling on accident in late suspend */
- if (!dev_priv->drm.dev->power.is_suspended)
- intel_hpd_poll_init(dev_priv);
-}
-
-static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, true);
-
- vlv_display_power_well_init(dev_priv);
-}
-
-static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_display_power_well_deinit(dev_priv);
-
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- /* since ref/cri clock was enabled */
- udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
-
- vlv_set_power_well(dev_priv, power_well, true);
-
- /*
- * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
- * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
- * a. GUnit 0x2110 bit[0] set to 1 (def 0)
- * b. The other bits such as sfr settings / modesel may all
- * be set to 0.
- *
- * This should only be done on init and resume from S3 with
- * both PLLs disabled, or we risk losing DPIO and PLL
- * synchronization.
- */
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
-}
-
-static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe)
- assert_pll_disabled(dev_priv, pipe);
-
- /* Assert common reset */
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
-
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
-#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
-
-#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
-
-static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
-{
- struct i915_power_well *cmn_bc =
- lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
- struct i915_power_well *cmn_d =
- lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
- u32 phy_control = dev_priv->chv_phy_control;
- u32 phy_status = 0;
- u32 phy_status_mask = 0xffffffff;
-
- /*
- * The BIOS can leave the PHY is some weird state
- * where it doesn't fully power down some parts.
- * Disable the asserts until the PHY has been fully
- * reset (ie. the power well has been disabled at
- * least once).
- */
- if (!dev_priv->chv_phy_assert[DPIO_PHY0])
- phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
- PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
-
- if (!dev_priv->chv_phy_assert[DPIO_PHY1])
- phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
-
- if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
- phy_status |= PHY_POWERGOOD(DPIO_PHY0);
-
- /* this assumes override is only used to enable lanes */
- if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
- phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
-
- if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
- phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
-
- /* CL1 is on whenever anything is on in either channel */
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
- PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
- phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
-
- /*
- * The DPLLB check accounts for the pipe B + port A usage
- * with CL2 powered up but all the lanes in the second channel
- * powered down.
- */
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
- (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
- phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
-
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
-
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
- }
-
- if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
- phy_status |= PHY_POWERGOOD(DPIO_PHY1);
-
- /* this assumes override is only used to enable lanes */
- if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
- phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
-
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
- phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
-
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
- }
-
- phy_status &= phy_status_mask;
-
- /*
- * The PHY may be busy with some initial calibration and whatnot,
- * so the power state can take a while to actually change.
- */
- if (intel_wait_for_register(&dev_priv->uncore,
- DISPLAY_PHY_STATUS,
- phy_status_mask,
- phy_status,
- 10))
- DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
- I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
- phy_status, dev_priv->chv_phy_control);
-}
-
-#undef BITS_SET
-
-static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum dpio_phy phy;
- enum pipe pipe;
- u32 tmp;
-
- WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
- power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
-
- if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
- pipe = PIPE_A;
- phy = DPIO_PHY0;
+ if (wakelock) {
+ atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
+ assert_rpm_wakelock_held(rpm);
} else {
- pipe = PIPE_C;
- phy = DPIO_PHY1;
+ atomic_inc(&rpm->wakeref_count);
+ assert_rpm_raw_wakeref_held(rpm);
}
-
- /* since ref/cri clock was enabled */
- udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
- vlv_set_power_well(dev_priv, power_well, true);
-
- /* Poll for phypwrgood signal */
- if (intel_wait_for_register(&dev_priv->uncore,
- DISPLAY_PHY_STATUS,
- PHY_POWERGOOD(phy),
- PHY_POWERGOOD(phy),
- 1))
- DRM_ERROR("Display PHY %d is not power up\n", phy);
-
- mutex_lock(&dev_priv->sb_lock);
-
- /* Enable dynamic power down */
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
- tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
- DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
-
- if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
- tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
- tmp |= DPIO_DYNPWRDOWNEN_CH1;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
- } else {
- /*
- * Force the non-existing CL2 off. BXT does this
- * too, so maybe it saves some power even though
- * CL2 doesn't exist?
- */
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
- tmp |= DPIO_CL2_LDOFUSE_PWRENB;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
- }
-
- mutex_unlock(&dev_priv->sb_lock);
-
- dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
-
- DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->chv_phy_control);
-
- assert_chv_phy_status(dev_priv);
-}
-
-static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum dpio_phy phy;
-
- WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
- power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
-
- if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
- phy = DPIO_PHY0;
- assert_pll_disabled(dev_priv, PIPE_A);
- assert_pll_disabled(dev_priv, PIPE_B);
- } else {
- phy = DPIO_PHY1;
- assert_pll_disabled(dev_priv, PIPE_C);
- }
-
- dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
-
- vlv_set_power_well(dev_priv, power_well, false);
-
- DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->chv_phy_control);
-
- /* PHY is fully reset now, so we can enable the PHY state asserts */
- dev_priv->chv_phy_assert[phy] = true;
-
- assert_chv_phy_status(dev_priv);
-}
-
-static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
- enum dpio_channel ch, bool override, unsigned int mask)
-{
- enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
- u32 reg, val, expected, actual;
-
- /*
- * The BIOS can leave the PHY is some weird state
- * where it doesn't fully power down some parts.
- * Disable the asserts until the PHY has been fully
- * reset (ie. the power well has been disabled at
- * least once).
- */
- if (!dev_priv->chv_phy_assert[phy])
- return;
-
- if (ch == DPIO_CH0)
- reg = _CHV_CMN_DW0_CH0;
- else
- reg = _CHV_CMN_DW6_CH1;
-
- mutex_lock(&dev_priv->sb_lock);
- val = vlv_dpio_read(dev_priv, pipe, reg);
- mutex_unlock(&dev_priv->sb_lock);
-
- /*
- * This assumes !override is only used when the port is disabled.
- * All lanes should power down even without the override when
- * the port is disabled.
- */
- if (!override || mask == 0xf) {
- expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
- /*
- * If CH1 common lane is not active anymore
- * (eg. for pipe B DPLL) the entire channel will
- * shut down, which causes the common lane registers
- * to read as 0. That means we can't actually check
- * the lane power down status bits, but as the entire
- * register reads as 0 it's a good indication that the
- * channel is indeed entirely powered down.
- */
- if (ch == DPIO_CH1 && val == 0)
- expected = 0;
- } else if (mask != 0x0) {
- expected = DPIO_ANYDL_POWERDOWN;
- } else {
- expected = 0;
- }
-
- if (ch == DPIO_CH0)
- actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
- else
- actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
- actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
-
- WARN(actual != expected,
- "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
- !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
- !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
- reg, val);
-}
-
-bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
- enum dpio_channel ch, bool override)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- bool was_override;
-
- mutex_lock(&power_domains->lock);
-
- was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
-
- if (override == was_override)
- goto out;
-
- if (override)
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
- else
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
-
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
-
- DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
- phy, ch, dev_priv->chv_phy_control);
-
- assert_chv_phy_status(dev_priv);
-
-out:
- mutex_unlock(&power_domains->lock);
-
- return was_override;
-}
-
-void chv_phy_powergate_lanes(struct intel_encoder *encoder,
- bool override, unsigned int mask)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
- enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
-
- mutex_lock(&power_domains->lock);
-
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
-
- if (override)
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
- else
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
-
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
-
- DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
- phy, ch, mask, dev_priv->chv_phy_control);
-
- assert_chv_phy_status(dev_priv);
-
- assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
-
- mutex_unlock(&power_domains->lock);
-}
-
-static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum pipe pipe = PIPE_A;
- bool enabled;
- u32 state, ctrl;
-
- mutex_lock(&dev_priv->pcu_lock);
-
- state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
- /*
- * We only ever set the power-on and power-gate states, anything
- * else is unexpected.
- */
- WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
- enabled = state == DP_SSS_PWR_ON(pipe);
-
- /*
- * A transient state at this point would mean some unexpected party
- * is poking at the power controls too.
- */
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
- WARN_ON(ctrl << 16 != state);
-
- mutex_unlock(&dev_priv->pcu_lock);
-
- return enabled;
-}
-
-static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well,
- bool enable)
-{
- enum pipe pipe = PIPE_A;
- u32 state;
- u32 ctrl;
-
- state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
-
- mutex_lock(&dev_priv->pcu_lock);
-
-#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
-
- if (COND)
- goto out;
-
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
- ctrl &= ~DP_SSC_MASK(pipe);
- ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
-
- if (wait_for(COND, 100))
- DRM_ERROR("timeout setting power well state %08x (%08x)\n",
- state,
- vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
-
-#undef COND
-
-out:
- mutex_unlock(&dev_priv->pcu_lock);
-}
-
-static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- chv_set_pipe_power_well(dev_priv, power_well, true);
-
- vlv_display_power_well_init(dev_priv);
-}
-
-static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_display_power_well_deinit(dev_priv);
-
- chv_set_pipe_power_well(dev_priv, power_well, false);
}
static void
-__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *power_well;
-
- for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
- intel_power_well_get(dev_priv, power_well);
-
- power_domains->domain_use_count[domain]++;
-}
-
-/**
- * intel_display_power_get - grab a power domain reference
- * @dev_priv: i915 device instance
- * @domain: power domain to reference
- *
- * This function grabs a power domain reference for @domain and ensures that the
- * power domain and all its parents are powered up. Therefore users should only
- * grab a reference to the innermost power domain they need.
- *
- * Any power domain reference obtained by this function must have a symmetric
- * call to intel_display_power_put() to release the reference again.
- */
-intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv);
-
- mutex_lock(&power_domains->lock);
-
- __intel_display_power_get_domain(dev_priv, domain);
-
- mutex_unlock(&power_domains->lock);
-
- return wakeref;
-}
-
-/**
- * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
- * @dev_priv: i915 device instance
- * @domain: power domain to reference
- *
- * This function grabs a power domain reference for @domain and ensures that the
- * power domain and all its parents are powered up. Therefore users should only
- * grab a reference to the innermost power domain they need.
- *
- * Any power domain reference obtained by this function must have a symmetric
- * call to intel_display_power_put() to release the reference again.
- */
-intel_wakeref_t
-intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- intel_wakeref_t wakeref;
- bool is_enabled;
-
- wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
- if (!wakeref)
- return false;
-
- mutex_lock(&power_domains->lock);
-
- if (__intel_display_power_is_enabled(dev_priv, domain)) {
- __intel_display_power_get_domain(dev_priv, domain);
- is_enabled = true;
- } else {
- is_enabled = false;
- }
-
- mutex_unlock(&power_domains->lock);
-
- if (!is_enabled) {
- intel_runtime_pm_put(dev_priv, wakeref);
- wakeref = 0;
- }
-
- return wakeref;
-}
-
-static void __intel_display_power_put(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains;
- struct i915_power_well *power_well;
-
- power_domains = &dev_priv->power_domains;
-
- mutex_lock(&power_domains->lock);
-
- WARN(!power_domains->domain_use_count[domain],
- "Use count on domain %s is already zero\n",
- intel_display_power_domain_str(domain));
- power_domains->domain_use_count[domain]--;
-
- for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
- intel_power_well_put(dev_priv, power_well);
-
- mutex_unlock(&power_domains->lock);
-}
-
-/**
- * intel_display_power_put - release a power domain reference
- * @dev_priv: i915 device instance
- * @domain: power domain to reference
- *
- * This function drops the power domain reference obtained by
- * intel_display_power_get() and might power down the corresponding hardware
- * block right away if this is the last reference.
- */
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- __intel_display_power_put(dev_priv, domain);
- intel_runtime_pm_put_unchecked(dev_priv);
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-void intel_display_power_put(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain,
- intel_wakeref_t wakeref)
-{
- __intel_display_power_put(dev_priv, domain);
- intel_runtime_pm_put(dev_priv, wakeref);
-}
-#endif
-
-#define I830_PIPES_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DISPLAY_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
- BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define CHV_DISPLAY_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define HSW_DISPLAY_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define BDW_DISPLAY_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
-#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
-#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
-#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_AUX_F) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_F) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-/*
- * ICL PW_0/PG_0 domains (HW/DMC control):
- * - PCI
- * - clocks except port PLL
- * - central power except FBC
- * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
- * ICL PW_1/PG_1 domains (HW/DMC control):
- * - DBUF function
- * - PIPE_A and its planes, except VGA
- * - transcoder EDP + PSR
- * - transcoder DSI
- * - DDI_A
- * - FBC
- */
-#define ICL_PW_4_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_INIT))
- /* VDSC/joining */
-#define ICL_PW_3_POWER_DOMAINS ( \
- ICL_PW_4_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_AUX_E) | \
- BIT_ULL(POWER_DOMAIN_AUX_F) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
- /*
- * - transcoder WD
- * - KVMR (HW control)
- */
-#define ICL_PW_2_POWER_DOMAINS ( \
- ICL_PW_3_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \
- BIT_ULL(POWER_DOMAIN_INIT))
- /*
- * - KVMR (HW control)
- */
-#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- ICL_PW_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define ICL_DDI_IO_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
-#define ICL_DDI_IO_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
-#define ICL_DDI_IO_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
-#define ICL_DDI_IO_D_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
-#define ICL_DDI_IO_E_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
-#define ICL_DDI_IO_F_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
-
-#define ICL_AUX_A_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
- BIT_ULL(POWER_DOMAIN_AUX_A))
-#define ICL_AUX_B_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_B))
-#define ICL_AUX_C_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_C))
-#define ICL_AUX_D_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_D))
-#define ICL_AUX_E_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_E))
-#define ICL_AUX_F_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_F))
-#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1))
-#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2))
-#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3))
-#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4))
-
-static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = i9xx_always_on_power_well_noop,
- .disable = i9xx_always_on_power_well_noop,
- .is_enabled = i9xx_always_on_power_well_enabled,
-};
-
-static const struct i915_power_well_ops chv_pipe_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = chv_pipe_power_well_enable,
- .disable = chv_pipe_power_well_disable,
- .is_enabled = chv_pipe_power_well_enabled,
-};
-
-static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = chv_dpio_cmn_power_well_enable,
- .disable = chv_dpio_cmn_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
-};
-
-static const struct i915_power_well_ops i830_pipes_power_well_ops = {
- .sync_hw = i830_pipes_power_well_sync_hw,
- .enable = i830_pipes_power_well_enable,
- .disable = i830_pipes_power_well_disable,
- .is_enabled = i830_pipes_power_well_enabled,
-};
-
-static const struct i915_power_well_desc i830_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "pipes",
- .domains = I830_PIPES_POWER_DOMAINS,
- .ops = &i830_pipes_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
-};
-
-static const struct i915_power_well_ops hsw_power_well_ops = {
- .sync_hw = hsw_power_well_sync_hw,
- .enable = hsw_power_well_enable,
- .disable = hsw_power_well_disable,
- .is_enabled = hsw_power_well_enabled,
-};
-
-static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = gen9_dc_off_power_well_enable,
- .disable = gen9_dc_off_power_well_disable,
- .is_enabled = gen9_dc_off_power_well_enabled,
-};
-
-static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = bxt_dpio_cmn_power_well_enable,
- .disable = bxt_dpio_cmn_power_well_disable,
- .is_enabled = bxt_dpio_cmn_power_well_enabled,
-};
-
-static const struct i915_power_well_regs hsw_power_well_regs = {
- .bios = HSW_PWR_WELL_CTL1,
- .driver = HSW_PWR_WELL_CTL2,
- .kvmr = HSW_PWR_WELL_CTL3,
- .debug = HSW_PWR_WELL_CTL4,
-};
-
-static const struct i915_power_well_desc hsw_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "display",
- .domains = HSW_DISPLAY_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = HSW_DISP_PW_GLOBAL,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
- .hsw.has_vga = true,
- },
- },
-};
-
-static const struct i915_power_well_desc bdw_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "display",
- .domains = BDW_DISPLAY_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = HSW_DISP_PW_GLOBAL,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
- .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
- .hsw.has_vga = true,
- },
- },
-};
-
-static const struct i915_power_well_ops vlv_display_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = vlv_display_power_well_enable,
- .disable = vlv_display_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = vlv_dpio_cmn_power_well_enable,
- .disable = vlv_dpio_cmn_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = vlv_power_well_enable,
- .disable = vlv_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_desc vlv_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "display",
- .domains = VLV_DISPLAY_POWER_DOMAINS,
- .ops = &vlv_display_power_well_ops,
- .id = VLV_DISP_PW_DISP2D,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
- },
- },
- {
- .name = "dpio-tx-b-01",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
- },
- },
- {
- .name = "dpio-tx-b-23",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
- },
- },
- {
- .name = "dpio-tx-c-01",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
- },
- },
- {
- .name = "dpio-tx-c-23",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
- },
- },
- {
- .name = "dpio-common",
- .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
- .ops = &vlv_dpio_cmn_power_well_ops,
- .id = VLV_DISP_PW_DPIO_CMN_BC,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
- },
- },
-};
-
-static const struct i915_power_well_desc chv_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "display",
- /*
- * Pipe A power well is the new disp2d well. Pipe B and C
- * power wells don't actually exist. Pipe A power well is
- * required for any pipe to work.
- */
- .domains = CHV_DISPLAY_POWER_DOMAINS,
- .ops = &chv_pipe_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "dpio-common-bc",
- .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
- .ops = &chv_dpio_cmn_power_well_ops,
- .id = VLV_DISP_PW_DPIO_CMN_BC,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
- },
- },
- {
- .name = "dpio-common-d",
- .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
- .ops = &chv_dpio_cmn_power_well_ops,
- .id = CHV_DISP_PW_DPIO_CMN_D,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
- },
- },
-};
-
-bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
- enum i915_power_well_id power_well_id)
-{
- struct i915_power_well *power_well;
- bool ret;
-
- power_well = lookup_power_well(dev_priv, power_well_id);
- ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
-
- return ret;
-}
-
-static const struct i915_power_well_desc skl_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "MISC IO power well",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_MISC_IO,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
- },
- },
- {
- .name = "DC off",
- .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 2",
- .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_2,
- .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DDI A/E IO power well",
- .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
- },
- },
- {
- .name = "DDI B IO power well",
- .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
- },
- },
- {
- .name = "DDI C IO power well",
- .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
- },
- },
- {
- .name = "DDI D IO power well",
- .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
- },
- },
-};
-
-static const struct i915_power_well_desc bxt_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 2",
- .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_2,
- .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "dpio-common-a",
- .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
- .ops = &bxt_dpio_cmn_power_well_ops,
- .id = BXT_DISP_PW_DPIO_CMN_A,
- {
- .bxt.phy = DPIO_PHY1,
- },
- },
- {
- .name = "dpio-common-bc",
- .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
- .ops = &bxt_dpio_cmn_power_well_ops,
- .id = VLV_DISP_PW_DPIO_CMN_BC,
- {
- .bxt.phy = DPIO_PHY0,
- },
- },
-};
-
-static const struct i915_power_well_desc glk_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 2",
- .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_2,
- .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "dpio-common-a",
- .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
- .ops = &bxt_dpio_cmn_power_well_ops,
- .id = BXT_DISP_PW_DPIO_CMN_A,
- {
- .bxt.phy = DPIO_PHY1,
- },
- },
- {
- .name = "dpio-common-b",
- .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
- .ops = &bxt_dpio_cmn_power_well_ops,
- .id = VLV_DISP_PW_DPIO_CMN_BC,
- {
- .bxt.phy = DPIO_PHY0,
- },
- },
- {
- .name = "dpio-common-c",
- .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
- .ops = &bxt_dpio_cmn_power_well_ops,
- .id = GLK_DISP_PW_DPIO_CMN_C,
- {
- .bxt.phy = DPIO_PHY2,
- },
- },
- {
- .name = "AUX A",
- .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
- },
- },
- {
- .name = "AUX B",
- .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
- },
- },
- {
- .name = "AUX C",
- .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
- },
- },
- {
- .name = "DDI A IO power well",
- .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
- },
- },
- {
- .name = "DDI B IO power well",
- .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
- },
- },
- {
- .name = "DDI C IO power well",
- .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
- },
- },
-};
-
-static const struct i915_power_well_desc cnl_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "AUX A",
- .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
- },
- },
- {
- .name = "AUX B",
- .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
- },
- },
- {
- .name = "AUX C",
- .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
- },
- },
- {
- .name = "AUX D",
- .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
- },
- },
- {
- .name = "DC off",
- .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 2",
- .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_2,
- .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DDI A IO power well",
- .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
- },
- },
- {
- .name = "DDI B IO power well",
- .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
- },
- },
- {
- .name = "DDI C IO power well",
- .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
- },
- },
- {
- .name = "DDI D IO power well",
- .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
- },
- },
- {
- .name = "DDI F IO power well",
- .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
- },
- },
- {
- .name = "AUX F",
- .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
- },
- },
-};
-
-static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
- .sync_hw = hsw_power_well_sync_hw,
- .enable = icl_combo_phy_aux_power_well_enable,
- .disable = icl_combo_phy_aux_power_well_disable,
- .is_enabled = hsw_power_well_enabled,
-};
-
-static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
- .sync_hw = hsw_power_well_sync_hw,
- .enable = icl_tc_phy_aux_power_well_enable,
- .disable = hsw_power_well_disable,
- .is_enabled = hsw_power_well_enabled,
-};
-
-static const struct i915_power_well_regs icl_aux_power_well_regs = {
- .bios = ICL_PWR_WELL_CTL_AUX1,
- .driver = ICL_PWR_WELL_CTL_AUX2,
- .debug = ICL_PWR_WELL_CTL_AUX4,
-};
-
-static const struct i915_power_well_regs icl_ddi_power_well_regs = {
- .bios = ICL_PWR_WELL_CTL_DDI1,
- .driver = ICL_PWR_WELL_CTL_DDI2,
- .debug = ICL_PWR_WELL_CTL_DDI4,
-};
-
-static const struct i915_power_well_desc icl_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 2",
- .domains = ICL_PW_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_2,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "power well 3",
- .domains = ICL_PW_3_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_3,
- .hsw.irq_pipe_mask = BIT(PIPE_B),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DDI A IO",
- .domains = ICL_DDI_IO_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
- },
- },
- {
- .name = "DDI B IO",
- .domains = ICL_DDI_IO_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
- },
- },
- {
- .name = "DDI C IO",
- .domains = ICL_DDI_IO_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
- },
- },
- {
- .name = "DDI D IO",
- .domains = ICL_DDI_IO_D_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
- },
- },
- {
- .name = "DDI E IO",
- .domains = ICL_DDI_IO_E_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
- },
- },
- {
- .name = "DDI F IO",
- .domains = ICL_DDI_IO_F_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
- },
- },
- {
- .name = "AUX A",
- .domains = ICL_AUX_A_IO_POWER_DOMAINS,
- .ops = &icl_combo_phy_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
- },
- },
- {
- .name = "AUX B",
- .domains = ICL_AUX_B_IO_POWER_DOMAINS,
- .ops = &icl_combo_phy_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
- },
- },
- {
- .name = "AUX C",
- .domains = ICL_AUX_C_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX D",
- .domains = ICL_AUX_D_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX E",
- .domains = ICL_AUX_E_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX F",
- .domains = ICL_AUX_F_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX TBT1",
- .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT2",
- .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT3",
- .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT4",
- .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "power well 4",
- .domains = ICL_PW_4_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_4,
- .hsw.has_fuses = true,
- .hsw.irq_pipe_mask = BIT(PIPE_C),
- },
- },
-};
-
-static int
-sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
- int disable_power_well)
-{
- if (disable_power_well >= 0)
- return !!disable_power_well;
-
- return 1;
-}
-
-static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
- int enable_dc)
-{
- u32 mask;
- int requested_dc;
- int max_dc;
-
- if (INTEL_GEN(dev_priv) >= 11) {
- max_dc = 2;
- /*
- * DC9 has a separate HW flow from the rest of the DC states,
- * not depending on the DMC firmware. It's needed by system
- * suspend/resume, so allow it unconditionally.
- */
- mask = DC_STATE_EN_DC9;
- } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
- max_dc = 2;
- mask = 0;
- } else if (IS_GEN9_LP(dev_priv)) {
- max_dc = 1;
- mask = DC_STATE_EN_DC9;
- } else {
- max_dc = 0;
- mask = 0;
- }
-
- if (!i915_modparams.disable_power_well)
- max_dc = 0;
-
- if (enable_dc >= 0 && enable_dc <= max_dc) {
- requested_dc = enable_dc;
- } else if (enable_dc == -1) {
- requested_dc = max_dc;
- } else if (enable_dc > max_dc && enable_dc <= 2) {
- DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
- enable_dc, max_dc);
- requested_dc = max_dc;
- } else {
- DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
- requested_dc = max_dc;
- }
-
- if (requested_dc > 1)
- mask |= DC_STATE_EN_UPTO_DC6;
- if (requested_dc > 0)
- mask |= DC_STATE_EN_UPTO_DC5;
-
- DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
-
- return mask;
-}
-
-static int
-__set_power_wells(struct i915_power_domains *power_domains,
- const struct i915_power_well_desc *power_well_descs,
- int power_well_count)
-{
- u64 power_well_ids = 0;
- int i;
-
- power_domains->power_well_count = power_well_count;
- power_domains->power_wells =
- kcalloc(power_well_count,
- sizeof(*power_domains->power_wells),
- GFP_KERNEL);
- if (!power_domains->power_wells)
- return -ENOMEM;
-
- for (i = 0; i < power_well_count; i++) {
- enum i915_power_well_id id = power_well_descs[i].id;
-
- power_domains->power_wells[i].desc = &power_well_descs[i];
-
- if (id == DISP_PW_ID_NONE)
- continue;
-
- WARN_ON(id >= sizeof(power_well_ids) * 8);
- WARN_ON(power_well_ids & BIT_ULL(id));
- power_well_ids |= BIT_ULL(id);
- }
-
- return 0;
-}
-
-#define set_power_wells(power_domains, __power_well_descs) \
- __set_power_wells(power_domains, __power_well_descs, \
- ARRAY_SIZE(__power_well_descs))
-
-/**
- * intel_power_domains_init - initializes the power domain structures
- * @dev_priv: i915 device instance
- *
- * Initializes the power domain structures for @dev_priv depending upon the
- * supported platform.
- */
-int intel_power_domains_init(struct drm_i915_private *dev_priv)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- int err;
-
- i915_modparams.disable_power_well =
- sanitize_disable_power_well_option(dev_priv,
- i915_modparams.disable_power_well);
- dev_priv->csr.allowed_dc_mask =
- get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
-
- BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
-
- mutex_init(&power_domains->lock);
-
- /*
- * The enabling order will be from lower to higher indexed wells,
- * the disabling order is reversed.
- */
- if (IS_GEN(dev_priv, 11)) {
- err = set_power_wells(power_domains, icl_power_wells);
- } else if (IS_CANNONLAKE(dev_priv)) {
- err = set_power_wells(power_domains, cnl_power_wells);
-
- /*
- * DDI and Aux IO are getting enabled for all ports
- * regardless the presence or use. So, in order to avoid
- * timeouts, lets remove them from the list
- * for the SKUs without port F.
- */
- if (!IS_CNL_WITH_PORT_F(dev_priv))
- power_domains->power_well_count -= 2;
- } else if (IS_GEMINILAKE(dev_priv)) {
- err = set_power_wells(power_domains, glk_power_wells);
- } else if (IS_BROXTON(dev_priv)) {
- err = set_power_wells(power_domains, bxt_power_wells);
- } else if (IS_GEN9_BC(dev_priv)) {
- err = set_power_wells(power_domains, skl_power_wells);
- } else if (IS_CHERRYVIEW(dev_priv)) {
- err = set_power_wells(power_domains, chv_power_wells);
- } else if (IS_BROADWELL(dev_priv)) {
- err = set_power_wells(power_domains, bdw_power_wells);
- } else if (IS_HASWELL(dev_priv)) {
- err = set_power_wells(power_domains, hsw_power_wells);
- } else if (IS_VALLEYVIEW(dev_priv)) {
- err = set_power_wells(power_domains, vlv_power_wells);
- } else if (IS_I830(dev_priv)) {
- err = set_power_wells(power_domains, i830_power_wells);
- } else {
- err = set_power_wells(power_domains, i9xx_always_on_power_well);
- }
-
- return err;
-}
-
-/**
- * intel_power_domains_cleanup - clean up power domains resources
- * @dev_priv: i915 device instance
- *
- * Release any resources acquired by intel_power_domains_init()
- */
-void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
-{
- kfree(dev_priv->power_domains.power_wells);
-}
-
-static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *power_well;
-
- mutex_lock(&power_domains->lock);
- for_each_power_well(dev_priv, power_well) {
- power_well->desc->ops->sync_hw(dev_priv, power_well);
- power_well->hw_enabled =
- power_well->desc->ops->is_enabled(dev_priv, power_well);
- }
- mutex_unlock(&power_domains->lock);
-}
-
-static inline
-bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
- i915_reg_t reg, bool enable)
-{
- u32 val, status;
-
- val = I915_READ(reg);
- val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
- udelay(10);
-
- status = I915_READ(reg) & DBUF_POWER_STATE;
- if ((enable && !status) || (!enable && status)) {
- DRM_ERROR("DBus power %s timeout!\n",
- enable ? "enable" : "disable");
- return false;
- }
- return true;
-}
-
-static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
-{
- intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
-}
-
-static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
-{
- intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
-}
-
-static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
-{
- if (INTEL_GEN(dev_priv) < 11)
- return 1;
- return 2;
-}
-
-void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
- u8 req_slices)
-{
- const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
- bool ret;
-
- if (req_slices > intel_dbuf_max_slices(dev_priv)) {
- DRM_ERROR("Invalid number of dbuf slices requested\n");
- return;
- }
-
- if (req_slices == hw_enabled_slices || req_slices == 0)
- return;
-
- if (req_slices > hw_enabled_slices)
- ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
- else
- ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
-
- if (ret)
- dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
-}
-
-static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
- I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL_S2);
-
- udelay(10);
-
- if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
- !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
- DRM_ERROR("DBuf power enable timeout\n");
- else
- /*
- * FIXME: for now pretend that we only have 1 slice, see
- * intel_enabled_dbuf_slices_num().
- */
- dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
-}
-
-static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
- I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL_S2);
-
- udelay(10);
-
- if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
- (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
- DRM_ERROR("DBuf power disable timeout!\n");
- else
- /*
- * FIXME: for now pretend that the first slice is always
- * enabled, see intel_enabled_dbuf_slices_num().
- */
- dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
-}
-
-static void icl_mbus_init(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
- MBUS_ABOX_BT_CREDIT_POOL2(16) |
- MBUS_ABOX_B_CREDIT(1) |
- MBUS_ABOX_BW_CREDIT(1);
-
- I915_WRITE(MBUS_ABOX_CTL, val);
-}
-
-static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
- bool enable)
-{
- i915_reg_t reg;
- u32 reset_bits, val;
-
- if (IS_IVYBRIDGE(dev_priv)) {
- reg = GEN7_MSG_CTL;
- reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
- } else {
- reg = HSW_NDE_RSTWRN_OPT;
- reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
- }
-
- val = I915_READ(reg);
-
- if (enable)
- val |= reset_bits;
- else
- val &= ~reset_bits;
-
- I915_WRITE(reg, val);
-}
-
-static void skl_display_core_init(struct drm_i915_private *dev_priv,
- bool resume)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *well;
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- /* enable PCH reset handshake */
- intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
-
- /* enable PG1 and Misc I/O */
- mutex_lock(&power_domains->lock);
-
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_enable(dev_priv, well);
-
- well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
- intel_power_well_enable(dev_priv, well);
-
- mutex_unlock(&power_domains->lock);
-
- intel_cdclk_init(dev_priv);
-
- gen9_dbuf_enable(dev_priv);
-
- if (resume && dev_priv->csr.dmc_payload)
- intel_csr_load_program(dev_priv);
-}
-
-static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *well;
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- gen9_dbuf_disable(dev_priv);
-
- intel_cdclk_uninit(dev_priv);
-
- /* The spec doesn't call for removing the reset handshake flag */
- /* disable PG1 and Misc I/O */
-
- mutex_lock(&power_domains->lock);
-
- /*
- * BSpec says to keep the MISC IO power well enabled here, only
- * remove our request for power well 1.
- * Note that even though the driver's request is removed power well 1
- * may stay enabled after this due to DMC's own request on it.
- */
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_disable(dev_priv, well);
-
- mutex_unlock(&power_domains->lock);
-
- usleep_range(10, 30); /* 10 us delay per Bspec */
-}
-
-void bxt_display_core_init(struct drm_i915_private *dev_priv,
- bool resume)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *well;
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- /*
- * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
- * or else the reset will hang because there is no PCH to respond.
- * Move the handshake programming to initialization sequence.
- * Previously was left up to BIOS.
- */
- intel_pch_reset_handshake(dev_priv, false);
-
- /* Enable PG1 */
- mutex_lock(&power_domains->lock);
-
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_enable(dev_priv, well);
-
- mutex_unlock(&power_domains->lock);
-
- intel_cdclk_init(dev_priv);
-
- gen9_dbuf_enable(dev_priv);
-
- if (resume && dev_priv->csr.dmc_payload)
- intel_csr_load_program(dev_priv);
-}
-
-void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *well;
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- gen9_dbuf_disable(dev_priv);
-
- intel_cdclk_uninit(dev_priv);
-
- /* The spec doesn't call for removing the reset handshake flag */
-
- /*
- * Disable PW1 (PG1).
- * Note that even though the driver's request is removed power well 1
- * may stay enabled after this due to DMC's own request on it.
- */
- mutex_lock(&power_domains->lock);
-
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_disable(dev_priv, well);
-
- mutex_unlock(&power_domains->lock);
-
- usleep_range(10, 30); /* 10 us delay per Bspec */
-}
-
-static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *well;
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- /* 1. Enable PCH Reset Handshake */
- intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
-
- /* 2-3. */
- cnl_combo_phys_init(dev_priv);
-
- /*
- * 4. Enable Power Well 1 (PG1).
- * The AUX IO power wells will be enabled on demand.
- */
- mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_enable(dev_priv, well);
- mutex_unlock(&power_domains->lock);
-
- /* 5. Enable CD clock */
- intel_cdclk_init(dev_priv);
-
- /* 6. Enable DBUF */
- gen9_dbuf_enable(dev_priv);
-
- if (resume && dev_priv->csr.dmc_payload)
- intel_csr_load_program(dev_priv);
-}
-
-static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
+intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *well;
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- /* 1. Disable all display engine functions -> aready done */
-
- /* 2. Disable DBUF */
- gen9_dbuf_disable(dev_priv);
-
- /* 3. Disable CD clock */
- intel_cdclk_uninit(dev_priv);
-
- /*
- * 4. Disable Power Well 1 (PG1).
- * The AUX IO power wells are toggled on demand, so they are already
- * disabled at this point.
- */
- mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_disable(dev_priv, well);
- mutex_unlock(&power_domains->lock);
-
- usleep_range(10, 30); /* 10 us delay per Bspec */
-
- /* 5. */
- cnl_combo_phys_uninit(dev_priv);
-}
-
-void icl_display_core_init(struct drm_i915_private *dev_priv,
- bool resume)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *well;
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- /* 1. Enable PCH reset handshake. */
- intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
-
- /* 2-3. */
- icl_combo_phys_init(dev_priv);
-
- /*
- * 4. Enable Power Well 1 (PG1).
- * The AUX IO power wells will be enabled on demand.
- */
- mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_enable(dev_priv, well);
- mutex_unlock(&power_domains->lock);
-
- /* 5. Enable CDCLK. */
- intel_cdclk_init(dev_priv);
-
- /* 6. Enable DBUF. */
- icl_dbuf_enable(dev_priv);
-
- /* 7. Setup MBUS. */
- icl_mbus_init(dev_priv);
-
- if (resume && dev_priv->csr.dmc_payload)
- intel_csr_load_program(dev_priv);
-}
-
-void icl_display_core_uninit(struct drm_i915_private *dev_priv)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *well;
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- /* 1. Disable all display engine functions -> aready done */
-
- /* 2. Disable DBUF */
- icl_dbuf_disable(dev_priv);
-
- /* 3. Disable CD clock */
- intel_cdclk_uninit(dev_priv);
-
- /*
- * 4. Disable Power Well 1 (PG1).
- * The AUX IO power wells are toggled on demand, so they are already
- * disabled at this point.
- */
- mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_disable(dev_priv, well);
- mutex_unlock(&power_domains->lock);
-
- /* 5. */
- icl_combo_phys_uninit(dev_priv);
-}
-
-static void chv_phy_control_init(struct drm_i915_private *dev_priv)
-{
- struct i915_power_well *cmn_bc =
- lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
- struct i915_power_well *cmn_d =
- lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
-
- /*
- * DISPLAY_PHY_CONTROL can get corrupted if read. As a
- * workaround never ever read DISPLAY_PHY_CONTROL, and
- * instead maintain a shadow copy ourselves. Use the actual
- * power well state and lane status to reconstruct the
- * expected initial value.
- */
- dev_priv->chv_phy_control =
- PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
- PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
- PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
- PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
- PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
-
- /*
- * If all lanes are disabled we leave the override disabled
- * with all power down bits cleared to match the state we
- * would use after disabling the port. Otherwise enable the
- * override and set the lane powerdown bits accding to the
- * current lane status.
- */
- if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
- u32 status = I915_READ(DPLL(PIPE_A));
- unsigned int mask;
-
- mask = status & DPLL_PORTB_READY_MASK;
- if (mask == 0xf)
- mask = 0x0;
- else
- dev_priv->chv_phy_control |=
- PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
-
- dev_priv->chv_phy_control |=
- PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
-
- mask = (status & DPLL_PORTC_READY_MASK) >> 4;
- if (mask == 0xf)
- mask = 0x0;
- else
- dev_priv->chv_phy_control |=
- PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
-
- dev_priv->chv_phy_control |=
- PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
-
- dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
-
- dev_priv->chv_phy_assert[DPIO_PHY0] = false;
- } else {
- dev_priv->chv_phy_assert[DPIO_PHY0] = true;
- }
-
- if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
- u32 status = I915_READ(DPIO_PHY_STATUS);
- unsigned int mask;
-
- mask = status & DPLL_PORTD_READY_MASK;
-
- if (mask == 0xf)
- mask = 0x0;
- else
- dev_priv->chv_phy_control |=
- PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
-
- dev_priv->chv_phy_control |=
- PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
-
- dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
-
- dev_priv->chv_phy_assert[DPIO_PHY1] = false;
+ if (wakelock) {
+ assert_rpm_wakelock_held(rpm);
+ atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
} else {
- dev_priv->chv_phy_assert[DPIO_PHY1] = true;
+ assert_rpm_raw_wakeref_held(rpm);
}
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
-
- DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
- dev_priv->chv_phy_control);
-}
-
-static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
-{
- struct i915_power_well *cmn =
- lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
- struct i915_power_well *disp2d =
- lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
-
- /* If the display might be already active skip this */
- if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
- disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
- I915_READ(DPIO_CTL) & DPIO_CMNRST)
- return;
-
- DRM_DEBUG_KMS("toggling display PHY side reset\n");
-
- /* cmnlane needs DPLL registers */
- disp2d->desc->ops->enable(dev_priv, disp2d);
-
- /*
- * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
- * Need to assert and de-assert PHY SB reset by gating the
- * common lane power, then un-gating it.
- * Simply ungating isn't enough to reset the PHY enough to get
- * ports and lanes running.
- */
- cmn->desc->ops->disable(dev_priv, cmn);
-}
-
-static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
-{
- bool ret;
-
- mutex_lock(&dev_priv->pcu_lock);
- ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
- mutex_unlock(&dev_priv->pcu_lock);
-
- return ret;
-}
-
-static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
-{
- WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
- "VED not power gated\n");
-}
-
-static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
-{
- static const struct pci_device_id isp_ids[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
- {}
- };
-
- WARN(!pci_dev_present(isp_ids) &&
- !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
- "ISP not power gated\n");
-}
-
-static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
-
-/**
- * intel_power_domains_init_hw - initialize hardware power domain state
- * @i915: i915 device instance
- * @resume: Called from resume code paths or not
- *
- * This function initializes the hardware power domain state and enables all
- * power wells belonging to the INIT power domain. Power wells in other
- * domains (and not in the INIT domain) are referenced or disabled by
- * intel_modeset_readout_hw_state(). After that the reference count of each
- * power well must match its HW enabled state, see
- * intel_power_domains_verify_state().
- *
- * It will return with power domains disabled (to be enabled later by
- * intel_power_domains_enable()) and must be paired with
- * intel_power_domains_fini_hw().
- */
-void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
-{
- struct i915_power_domains *power_domains = &i915->power_domains;
-
- power_domains->initializing = true;
-
- if (INTEL_GEN(i915) >= 11) {
- icl_display_core_init(i915, resume);
- } else if (IS_CANNONLAKE(i915)) {
- cnl_display_core_init(i915, resume);
- } else if (IS_GEN9_BC(i915)) {
- skl_display_core_init(i915, resume);
- } else if (IS_GEN9_LP(i915)) {
- bxt_display_core_init(i915, resume);
- } else if (IS_CHERRYVIEW(i915)) {
- mutex_lock(&power_domains->lock);
- chv_phy_control_init(i915);
- mutex_unlock(&power_domains->lock);
- assert_isp_power_gated(i915);
- } else if (IS_VALLEYVIEW(i915)) {
- mutex_lock(&power_domains->lock);
- vlv_cmnlane_wa(i915);
- mutex_unlock(&power_domains->lock);
- assert_ved_power_gated(i915);
- assert_isp_power_gated(i915);
- } else if (IS_IVYBRIDGE(i915) || INTEL_GEN(i915) >= 7) {
- intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
- }
-
- /*
- * Keep all power wells enabled for any dependent HW access during
- * initialization and to make sure we keep BIOS enabled display HW
- * resources powered until display HW readout is complete. We drop
- * this reference in intel_power_domains_enable().
- */
- power_domains->wakeref =
- intel_display_power_get(i915, POWER_DOMAIN_INIT);
-
- /* Disable power support if the user asked so. */
- if (!i915_modparams.disable_power_well)
- intel_display_power_get(i915, POWER_DOMAIN_INIT);
- intel_power_domains_sync_hw(i915);
-
- power_domains->initializing = false;
+ __intel_wakeref_dec_and_check_tracking(rpm);
}
-/**
- * intel_power_domains_fini_hw - deinitialize hw power domain state
- * @i915: i915 device instance
- *
- * De-initializes the display power domain HW state. It also ensures that the
- * device stays powered up so that the driver can be reloaded.
- *
- * It must be called with power domains already disabled (after a call to
- * intel_power_domains_disable()) and must be paired with
- * intel_power_domains_init_hw().
- */
-void intel_power_domains_fini_hw(struct drm_i915_private *i915)
+static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
+ bool wakelock)
{
- intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->power_domains.wakeref);
-
- /* Remove the refcount we took to keep power well support disabled. */
- if (!i915_modparams.disable_power_well)
- intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
-
- intel_power_domains_verify_state(i915);
-
- /* Keep the power well enabled, but cancel its rpm wakeref. */
- intel_runtime_pm_put(i915, wakeref);
-}
-
-/**
- * intel_power_domains_enable - enable toggling of display power wells
- * @i915: i915 device instance
- *
- * Enable the ondemand enabling/disabling of the display power wells. Note that
- * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
- * only at specific points of the display modeset sequence, thus they are not
- * affected by the intel_power_domains_enable()/disable() calls. The purpose
- * of these function is to keep the rest of power wells enabled until the end
- * of display HW readout (which will acquire the power references reflecting
- * the current HW state).
- */
-void intel_power_domains_enable(struct drm_i915_private *i915)
-{
- intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->power_domains.wakeref);
-
- intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
- intel_power_domains_verify_state(i915);
-}
+ int ret;
-/**
- * intel_power_domains_disable - disable toggling of display power wells
- * @i915: i915 device instance
- *
- * Disable the ondemand enabling/disabling of the display power wells. See
- * intel_power_domains_enable() for which power wells this call controls.
- */
-void intel_power_domains_disable(struct drm_i915_private *i915)
-{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ ret = pm_runtime_get_sync(rpm->kdev);
+ WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
- WARN_ON(power_domains->wakeref);
- power_domains->wakeref =
- intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ intel_runtime_pm_acquire(rpm, wakelock);
- intel_power_domains_verify_state(i915);
+ return track_intel_runtime_pm_wakeref(rpm);
}
/**
- * intel_power_domains_suspend - suspend power domain state
- * @i915: i915 device instance
- * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
+ * intel_runtime_pm_get_raw - grab a raw runtime pm reference
+ * @rpm: the intel_runtime_pm structure
*
- * This function prepares the hardware power domain state before entering
- * system suspend.
- *
- * It must be called with power domains already disabled (after a call to
- * intel_power_domains_disable()) and paired with intel_power_domains_resume().
- */
-void intel_power_domains_suspend(struct drm_i915_private *i915,
- enum i915_drm_suspend_mode suspend_mode)
-{
- struct i915_power_domains *power_domains = &i915->power_domains;
- intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&power_domains->wakeref);
-
- intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
-
- /*
- * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
- * support don't manually deinit the power domains. This also means the
- * CSR/DMC firmware will stay active, it will power down any HW
- * resources as required and also enable deeper system power states
- * that would be blocked if the firmware was inactive.
- */
- if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
- suspend_mode == I915_DRM_SUSPEND_IDLE &&
- i915->csr.dmc_payload) {
- intel_power_domains_verify_state(i915);
- return;
- }
-
- /*
- * Even if power well support was disabled we still want to disable
- * power wells if power domains must be deinitialized for suspend.
- */
- if (!i915_modparams.disable_power_well) {
- intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
- intel_power_domains_verify_state(i915);
- }
-
- if (INTEL_GEN(i915) >= 11)
- icl_display_core_uninit(i915);
- else if (IS_CANNONLAKE(i915))
- cnl_display_core_uninit(i915);
- else if (IS_GEN9_BC(i915))
- skl_display_core_uninit(i915);
- else if (IS_GEN9_LP(i915))
- bxt_display_core_uninit(i915);
-
- power_domains->display_core_suspended = true;
-}
-
-/**
- * intel_power_domains_resume - resume power domain state
- * @i915: i915 device instance
+ * This is the unlocked version of intel_display_power_is_enabled() and should
+ * only be used from error capture and recovery code where deadlocks are
+ * possible.
+ * This function grabs a device-level runtime pm reference (mostly used for
+ * asynchronous PM management from display code) and ensures that it is powered
+ * up. Raw references are not considered during wakelock assert checks.
*
- * This function resume the hardware power domain state during system resume.
+ * Any runtime pm reference obtained by this function must have a symmetric
+ * call to intel_runtime_pm_put_raw() to release the reference again.
*
- * It will return with power domain support disabled (to be enabled later by
- * intel_power_domains_enable()) and must be paired with
- * intel_power_domains_suspend().
+ * Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates
+ * as True if the wakeref was acquired, or False otherwise.
*/
-void intel_power_domains_resume(struct drm_i915_private *i915)
+intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
-
- if (power_domains->display_core_suspended) {
- intel_power_domains_init_hw(i915, true);
- power_domains->display_core_suspended = false;
- } else {
- WARN_ON(power_domains->wakeref);
- power_domains->wakeref =
- intel_display_power_get(i915, POWER_DOMAIN_INIT);
- }
-
- intel_power_domains_verify_state(i915);
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-
-static void intel_power_domains_dump_info(struct drm_i915_private *i915)
-{
- struct i915_power_domains *power_domains = &i915->power_domains;
- struct i915_power_well *power_well;
-
- for_each_power_well(i915, power_well) {
- enum intel_display_power_domain domain;
-
- DRM_DEBUG_DRIVER("%-25s %d\n",
- power_well->desc->name, power_well->count);
-
- for_each_power_domain(domain, power_well->desc->domains)
- DRM_DEBUG_DRIVER(" %-23s %d\n",
- intel_display_power_domain_str(domain),
- power_domains->domain_use_count[domain]);
- }
+ return __intel_runtime_pm_get(rpm, false);
}
/**
- * intel_power_domains_verify_state - verify the HW/SW state for all power wells
- * @i915: i915 device instance
- *
- * Verify if the reference count of each power well matches its HW enabled
- * state and the total refcount of the domains it belongs to. This must be
- * called after modeset HW state sanitization, which is responsible for
- * acquiring reference counts for any power wells in use and disabling the
- * ones left on by BIOS but not required by any active output.
- */
-static void intel_power_domains_verify_state(struct drm_i915_private *i915)
-{
- struct i915_power_domains *power_domains = &i915->power_domains;
- struct i915_power_well *power_well;
- bool dump_domain_info;
-
- mutex_lock(&power_domains->lock);
-
- dump_domain_info = false;
- for_each_power_well(i915, power_well) {
- enum intel_display_power_domain domain;
- int domains_count;
- bool enabled;
-
- enabled = power_well->desc->ops->is_enabled(i915, power_well);
- if ((power_well->count || power_well->desc->always_on) !=
- enabled)
- DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
- power_well->desc->name,
- power_well->count, enabled);
-
- domains_count = 0;
- for_each_power_domain(domain, power_well->desc->domains)
- domains_count += power_domains->domain_use_count[domain];
-
- if (power_well->count != domains_count) {
- DRM_ERROR("power well %s refcount/domain refcount mismatch "
- "(refcount %d/domains refcount %d)\n",
- power_well->desc->name, power_well->count,
- domains_count);
- dump_domain_info = true;
- }
- }
-
- if (dump_domain_info) {
- static bool dumped;
-
- if (!dumped) {
- intel_power_domains_dump_info(i915);
- dumped = true;
- }
- }
-
- mutex_unlock(&power_domains->lock);
-}
-
-#else
-
-static void intel_power_domains_verify_state(struct drm_i915_private *i915)
-{
-}
-
-#endif
-
-/**
* intel_runtime_pm_get - grab a runtime pm reference
- * @i915: i915 device instance
+ * @rpm: the intel_runtime_pm structure
*
* This function grabs a device-level runtime pm reference (mostly used for GEM
* code to ensure the GTT or GT is on) and ensures that it is powered up.
@@ -4332,21 +401,14 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
*
* Returns: the wakeref cookie to pass to intel_runtime_pm_put()
*/
-intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
+intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
{
- struct pci_dev *pdev = i915->drm.pdev;
- struct device *kdev = &pdev->dev;
- int ret;
-
- ret = pm_runtime_get_sync(kdev);
- WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
-
- return track_intel_runtime_pm_wakeref(i915);
+ return __intel_runtime_pm_get(rpm, true);
}
/**
* intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
- * @i915: i915 device instance
+ * @rpm: the intel_runtime_pm structure
*
* This function grabs a device-level runtime pm reference if the device is
* already in use and ensures that it is powered up. It is illegal to try
@@ -4358,28 +420,27 @@ intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
* Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
* as True if the wakeref was acquired, or False otherwise.
*/
-intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
+intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
{
if (IS_ENABLED(CONFIG_PM)) {
- struct pci_dev *pdev = i915->drm.pdev;
- struct device *kdev = &pdev->dev;
-
/*
* In cases runtime PM is disabled by the RPM core and we get
* an -EINVAL return value we are not supposed to call this
* function, since the power state is undefined. This applies
* atm to the late/early system suspend/resume handlers.
*/
- if (pm_runtime_get_if_in_use(kdev) <= 0)
+ if (pm_runtime_get_if_in_use(rpm->kdev) <= 0)
return 0;
}
- return track_intel_runtime_pm_wakeref(i915);
+ intel_runtime_pm_acquire(rpm, true);
+
+ return track_intel_runtime_pm_wakeref(rpm);
}
/**
* intel_runtime_pm_get_noresume - grab a runtime pm reference
- * @i915: i915 device instance
+ * @rpm: the intel_runtime_pm structure
*
* This function grabs a device-level runtime pm reference (mostly used for GEM
* code to ensure the GTT or GT is on).
@@ -4396,47 +457,81 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
*
* Returns: the wakeref cookie to pass to intel_runtime_pm_put()
*/
-intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
+intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm)
{
- struct pci_dev *pdev = i915->drm.pdev;
- struct device *kdev = &pdev->dev;
+ assert_rpm_wakelock_held(rpm);
+ pm_runtime_get_noresume(rpm->kdev);
- assert_rpm_wakelock_held(i915);
- pm_runtime_get_noresume(kdev);
+ intel_runtime_pm_acquire(rpm, true);
- return track_intel_runtime_pm_wakeref(i915);
+ return track_intel_runtime_pm_wakeref(rpm);
+}
+
+static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm,
+ intel_wakeref_t wref,
+ bool wakelock)
+{
+ struct device *kdev = rpm->kdev;
+
+ untrack_intel_runtime_pm_wakeref(rpm, wref);
+
+ intel_runtime_pm_release(rpm, wakelock);
+
+ pm_runtime_mark_last_busy(kdev);
+ pm_runtime_put_autosuspend(kdev);
}
/**
- * intel_runtime_pm_put - release a runtime pm reference
- * @i915: i915 device instance
+ * intel_runtime_pm_put_raw - release a raw runtime pm reference
+ * @rpm: the intel_runtime_pm structure
+ * @wref: wakeref acquired for the reference that is being released
*
* This function drops the device-level runtime pm reference obtained by
- * intel_runtime_pm_get() and might power down the corresponding
+ * intel_runtime_pm_get_raw() and might power down the corresponding
* hardware block right away if this is the last reference.
*/
-void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
+void
+intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
{
- struct pci_dev *pdev = i915->drm.pdev;
- struct device *kdev = &pdev->dev;
-
- untrack_intel_runtime_pm_wakeref(i915);
+ __intel_runtime_pm_put(rpm, wref, false);
+}
- pm_runtime_mark_last_busy(kdev);
- pm_runtime_put_autosuspend(kdev);
+/**
+ * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference
+ * @rpm: the intel_runtime_pm structure
+ *
+ * This function drops the device-level runtime pm reference obtained by
+ * intel_runtime_pm_get() and might power down the corresponding
+ * hardware block right away if this is the last reference.
+ *
+ * This function exists only for historical reasons and should be avoided in
+ * new code, as the correctness of its use cannot be checked. Always use
+ * intel_runtime_pm_put() instead.
+ */
+void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm)
+{
+ __intel_runtime_pm_put(rpm, -1, true);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
+/**
+ * intel_runtime_pm_put - release a runtime pm reference
+ * @rpm: the intel_runtime_pm structure
+ * @wref: wakeref acquired for the reference that is being released
+ *
+ * This function drops the device-level runtime pm reference obtained by
+ * intel_runtime_pm_get() and might power down the corresponding
+ * hardware block right away if this is the last reference.
+ */
+void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
{
- cancel_intel_runtime_pm_wakeref(i915, wref);
- intel_runtime_pm_put_unchecked(i915);
+ __intel_runtime_pm_put(rpm, wref, true);
}
#endif
/**
* intel_runtime_pm_enable - enable runtime pm
- * @i915: i915 device instance
+ * @rpm: the intel_runtime_pm structure
*
* This function enables runtime pm at the end of the driver load sequence.
*
@@ -4444,10 +539,9 @@ void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
* subordinate display power domains. That is done by
* intel_power_domains_enable().
*/
-void intel_runtime_pm_enable(struct drm_i915_private *i915)
+void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
{
- struct pci_dev *pdev = i915->drm.pdev;
- struct device *kdev = &pdev->dev;
+ struct device *kdev = rpm->kdev;
/*
* Disable the system suspend direct complete optimization, which can
@@ -4468,7 +562,7 @@ void intel_runtime_pm_enable(struct drm_i915_private *i915)
* so the driver's own RPM reference tracking asserts also work on
* platforms without RPM support.
*/
- if (!HAS_RUNTIME_PM(i915)) {
+ if (!rpm->available) {
int ret;
pm_runtime_dont_use_autosuspend(kdev);
@@ -4486,10 +580,9 @@ void intel_runtime_pm_enable(struct drm_i915_private *i915)
pm_runtime_put_autosuspend(kdev);
}
-void intel_runtime_pm_disable(struct drm_i915_private *i915)
+void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
{
- struct pci_dev *pdev = i915->drm.pdev;
- struct device *kdev = &pdev->dev;
+ struct device *kdev = rpm->kdev;
/* Transfer rpm ownership back to core */
WARN(pm_runtime_get_sync(kdev) < 0,
@@ -4497,24 +590,31 @@ void intel_runtime_pm_disable(struct drm_i915_private *i915)
pm_runtime_dont_use_autosuspend(kdev);
- if (!HAS_RUNTIME_PM(i915))
+ if (!rpm->available)
pm_runtime_put(kdev);
}
-void intel_runtime_pm_cleanup(struct drm_i915_private *i915)
+void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm)
{
- struct i915_runtime_pm *rpm = &i915->runtime_pm;
- int count;
+ int count = atomic_read(&rpm->wakeref_count);
- count = atomic_fetch_inc(&rpm->wakeref_count); /* balance untrack */
WARN(count,
- "i915->runtime_pm.wakeref_count=%d on cleanup\n",
- count);
+ "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
+ intel_rpm_raw_wakeref_count(count),
+ intel_rpm_wakelock_count(count));
- untrack_intel_runtime_pm_wakeref(i915);
+ untrack_all_intel_runtime_pm_wakerefs(rpm);
}
-void intel_runtime_pm_init_early(struct drm_i915_private *i915)
+void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
{
- init_intel_runtime_pm_wakeref(i915);
+ struct drm_i915_private *i915 =
+ container_of(rpm, struct drm_i915_private, runtime_pm);
+ struct pci_dev *pdev = i915->drm.pdev;
+ struct device *kdev = &pdev->dev;
+
+ rpm->kdev = kdev;
+ rpm->available = HAS_RUNTIME_PM(i915);
+
+ init_intel_runtime_pm_wakeref(rpm);
}
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
new file mode 100644
index 000000000000..473c4850c01d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_RUNTIME_PM_H__
+#define __INTEL_RUNTIME_PM_H__
+
+#include <linux/types.h>
+
+#include "display/intel_display.h"
+
+#include "intel_wakeref.h"
+
+#include "i915_utils.h"
+
+struct device;
+struct drm_i915_private;
+struct drm_printer;
+
+enum i915_drm_suspend_mode {
+ I915_DRM_SUSPEND_IDLE,
+ I915_DRM_SUSPEND_MEM,
+ I915_DRM_SUSPEND_HIBERNATE,
+};
+
+/*
+ * This struct helps tracking the state needed for runtime PM, which puts the
+ * device in PCI D3 state. Notice that when this happens, nothing on the
+ * graphics device works, even register access, so we don't get interrupts nor
+ * anything else.
+ *
+ * Every piece of our code that needs to actually touch the hardware needs to
+ * either call intel_runtime_pm_get or call intel_display_power_get with the
+ * appropriate power domain.
+ *
+ * Our driver uses the autosuspend delay feature, which means we'll only really
+ * suspend if we stay with zero refcount for a certain amount of time. The
+ * default value is currently very conservative (see intel_runtime_pm_enable), but
+ * it can be changed with the standard runtime PM files from sysfs.
+ *
+ * The irqs_disabled variable becomes true exactly after we disable the IRQs and
+ * goes back to false exactly before we reenable the IRQs. We use this variable
+ * to check if someone is trying to enable/disable IRQs while they're supposed
+ * to be disabled. This shouldn't happen and we'll print some error messages in
+ * case it happens.
+ *
+ * For more, read the Documentation/power/runtime_pm.txt.
+ */
+struct intel_runtime_pm {
+ atomic_t wakeref_count;
+ struct device *kdev; /* points to i915->drm.pdev->dev */
+ bool available;
+ bool suspended;
+ bool irqs_enabled;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ /*
+ * To aide detection of wakeref leaks and general misuse, we
+ * track all wakeref holders. With manual markup (i.e. returning
+ * a cookie to each rpm_get caller which they then supply to their
+ * paired rpm_put) we can remove corresponding pairs of and keep
+ * the array trimmed to active wakerefs.
+ */
+ struct intel_runtime_pm_debug {
+ spinlock_t lock;
+
+ depot_stack_handle_t last_acquire;
+ depot_stack_handle_t last_release;
+
+ depot_stack_handle_t *owners;
+ unsigned long count;
+ } debug;
+#endif
+};
+
+#define BITS_PER_WAKEREF \
+ BITS_PER_TYPE(struct_member(struct intel_runtime_pm, wakeref_count))
+#define INTEL_RPM_WAKELOCK_SHIFT (BITS_PER_WAKEREF / 2)
+#define INTEL_RPM_WAKELOCK_BIAS (1 << INTEL_RPM_WAKELOCK_SHIFT)
+#define INTEL_RPM_RAW_WAKEREF_MASK (INTEL_RPM_WAKELOCK_BIAS - 1)
+
+static inline int
+intel_rpm_raw_wakeref_count(int wakeref_count)
+{
+ return wakeref_count & INTEL_RPM_RAW_WAKEREF_MASK;
+}
+
+static inline int
+intel_rpm_wakelock_count(int wakeref_count)
+{
+ return wakeref_count >> INTEL_RPM_WAKELOCK_SHIFT;
+}
+
+static inline void
+assert_rpm_device_not_suspended(struct intel_runtime_pm *rpm)
+{
+ WARN_ONCE(rpm->suspended,
+ "Device suspended during HW access\n");
+}
+
+static inline void
+__assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm, int wakeref_count)
+{
+ assert_rpm_device_not_suspended(rpm);
+ WARN_ONCE(!intel_rpm_raw_wakeref_count(wakeref_count),
+ "RPM raw-wakeref not held\n");
+}
+
+static inline void
+__assert_rpm_wakelock_held(struct intel_runtime_pm *rpm, int wakeref_count)
+{
+ __assert_rpm_raw_wakeref_held(rpm, wakeref_count);
+ WARN_ONCE(!intel_rpm_wakelock_count(wakeref_count),
+ "RPM wakelock ref not held during HW access\n");
+}
+
+static inline void
+assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm)
+{
+ __assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count));
+}
+
+static inline void
+assert_rpm_wakelock_held(struct intel_runtime_pm *rpm)
+{
+ __assert_rpm_wakelock_held(rpm, atomic_read(&rpm->wakeref_count));
+}
+
+/**
+ * disable_rpm_wakeref_asserts - disable the RPM assert checks
+ * @rpm: the intel_runtime_pm structure
+ *
+ * This function disable asserts that check if we hold an RPM wakelock
+ * reference, while keeping the device-not-suspended checks still enabled.
+ * It's meant to be used only in special circumstances where our rule about
+ * the wakelock refcount wrt. the device power state doesn't hold. According
+ * to this rule at any point where we access the HW or want to keep the HW in
+ * an active state we must hold an RPM wakelock reference acquired via one of
+ * the intel_runtime_pm_get() helpers. Currently there are a few special spots
+ * where this rule doesn't hold: the IRQ and suspend/resume handlers, the
+ * forcewake release timer, and the GPU RPS and hangcheck works. All other
+ * users should avoid using this function.
+ *
+ * Any calls to this function must have a symmetric call to
+ * enable_rpm_wakeref_asserts().
+ */
+static inline void
+disable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
+{
+ atomic_add(INTEL_RPM_WAKELOCK_BIAS + 1,
+ &rpm->wakeref_count);
+}
+
+/**
+ * enable_rpm_wakeref_asserts - re-enable the RPM assert checks
+ * @rpm: the intel_runtime_pm structure
+ *
+ * This function re-enables the RPM assert checks after disabling them with
+ * disable_rpm_wakeref_asserts. It's meant to be used only in special
+ * circumstances otherwise its use should be avoided.
+ *
+ * Any calls to this function must have a symmetric call to
+ * disable_rpm_wakeref_asserts().
+ */
+static inline void
+enable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
+{
+ atomic_sub(INTEL_RPM_WAKELOCK_BIAS + 1,
+ &rpm->wakeref_count);
+}
+
+void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm);
+void intel_runtime_pm_enable(struct intel_runtime_pm *rpm);
+void intel_runtime_pm_disable(struct intel_runtime_pm *rpm);
+void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm);
+
+intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
+intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
+intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm);
+intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
+
+#define with_intel_runtime_pm(rpm, wf) \
+ for ((wf) = intel_runtime_pm_get(rpm); (wf); \
+ intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
+
+#define with_intel_runtime_pm_if_in_use(rpm, wf) \
+ for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \
+ intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
+
+void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
+#else
+static inline void
+intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
+{
+ intel_runtime_pm_put_unchecked(rpm);
+}
+#endif
+void intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
+ struct drm_printer *p);
+#else
+static inline void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
+ struct drm_printer *p)
+{
+}
+#endif
+
+#endif /* __INTEL_RUNTIME_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 57de41b1f989..a115625e980c 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -22,6 +22,10 @@
*
*/
+#include <asm/iosf_mbi.h>
+
+#include "intel_sideband.h"
+
#include "i915_drv.h"
#include "intel_drv.h"
@@ -39,19 +43,68 @@
/* Private register write, double-word addressing, non-posted */
#define SB_CRWRDA_NP 0x07
-static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
- u32 port, u32 opcode, u32 addr, u32 *val)
+static void ping(void *info)
+{
+}
+
+static void __vlv_punit_get(struct drm_i915_private *i915)
+{
+ iosf_mbi_punit_acquire();
+
+ /*
+ * Prevent the cpu from sleeping while we use this sideband, otherwise
+ * the punit may cause a machine hang. The issue appears to be isolated
+ * with changing the power state of the CPU package while changing
+ * the power state via the punit, and we have only observed it
+ * reliably on 4-core Baytail systems suggesting the issue is in the
+ * power delivery mechanism and likely to be be board/function
+ * specific. Hence we presume the workaround needs only be applied
+ * to the Valleyview P-unit and not all sideband communications.
+ */
+ if (IS_VALLEYVIEW(i915)) {
+ pm_qos_update_request(&i915->sb_qos, 0);
+ on_each_cpu(ping, NULL, 1);
+ }
+}
+
+static void __vlv_punit_put(struct drm_i915_private *i915)
+{
+ if (IS_VALLEYVIEW(i915))
+ pm_qos_update_request(&i915->sb_qos, PM_QOS_DEFAULT_VALUE);
+
+ iosf_mbi_punit_release();
+}
+
+void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
{
- u32 cmd, be = 0xf, bar = 0;
- bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
+ if (ports & BIT(VLV_IOSF_SB_PUNIT))
+ __vlv_punit_get(i915);
- cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
- (port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
- (bar << IOSF_BAR_SHIFT);
+ mutex_lock(&i915->sb_lock);
+}
+
+void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
+{
+ mutex_unlock(&i915->sb_lock);
+
+ if (ports & BIT(VLV_IOSF_SB_PUNIT))
+ __vlv_punit_put(i915);
+}
+
+static int vlv_sideband_rw(struct drm_i915_private *i915,
+ u32 devfn, u32 port, u32 opcode,
+ u32 addr, u32 *val)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ const bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
+ int err;
- WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
+ lockdep_assert_held(&i915->sb_lock);
+ if (port == IOSF_PORT_PUNIT)
+ iosf_mbi_assert_punit_acquired();
- if (intel_wait_for_register(&dev_priv->uncore,
+ /* Flush the previous comms, just in case it failed last time. */
+ if (intel_wait_for_register(uncore,
VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
5)) {
DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
@@ -59,131 +112,132 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
return -EAGAIN;
}
- I915_WRITE(VLV_IOSF_ADDR, addr);
- I915_WRITE(VLV_IOSF_DATA, is_read ? 0 : *val);
- I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
-
- if (intel_wait_for_register(&dev_priv->uncore,
- VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
- 5)) {
+ preempt_disable();
+
+ intel_uncore_write_fw(uncore, VLV_IOSF_ADDR, addr);
+ intel_uncore_write_fw(uncore, VLV_IOSF_DATA, is_read ? 0 : *val);
+ intel_uncore_write_fw(uncore, VLV_IOSF_DOORBELL_REQ,
+ (devfn << IOSF_DEVFN_SHIFT) |
+ (opcode << IOSF_OPCODE_SHIFT) |
+ (port << IOSF_PORT_SHIFT) |
+ (0xf << IOSF_BYTE_ENABLES_SHIFT) |
+ (0 << IOSF_BAR_SHIFT) |
+ IOSF_SB_BUSY);
+
+ if (__intel_wait_for_register_fw(uncore,
+ VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
+ 10000, 0, NULL) == 0) {
+ if (is_read)
+ *val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA);
+ err = 0;
+ } else {
DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
is_read ? "read" : "write");
- return -ETIMEDOUT;
+ err = -ETIMEDOUT;
}
- if (is_read)
- *val = I915_READ(VLV_IOSF_DATA);
+ preempt_enable();
- return 0;
+ return err;
}
-u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr)
+u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr)
{
u32 val = 0;
- WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
-
- mutex_lock(&dev_priv->sb_lock);
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
SB_CRRDDA_NP, addr, &val);
- mutex_unlock(&dev_priv->sb_lock);
return val;
}
-int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val)
+int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val)
{
- int err;
-
- WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
-
- mutex_lock(&dev_priv->sb_lock);
- err = vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
- SB_CRWRDA_NP, addr, &val);
- mutex_unlock(&dev_priv->sb_lock);
-
- return err;
+ return vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
+ SB_CRWRDA_NP, addr, &val);
}
-u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
+u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
SB_CRRDDA_NP, reg, &val);
return val;
}
-void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
SB_CRWRDA_NP, reg, &val);
}
-u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
+u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
{
u32 val = 0;
- WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
-
- mutex_lock(&dev_priv->sb_lock);
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC,
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_NC,
SB_CRRDDA_NP, addr, &val);
- mutex_unlock(&dev_priv->sb_lock);
return val;
}
-u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg)
+u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port,
+
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
SB_CRRDDA_NP, reg, &val);
+
return val;
}
-void vlv_iosf_sb_write(struct drm_i915_private *dev_priv,
+void vlv_iosf_sb_write(struct drm_i915_private *i915,
u8 port, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port,
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
SB_CRWRDA_NP, reg, &val);
}
-u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
+u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
+
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
SB_CRRDDA_NP, reg, &val);
+
return val;
}
-void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
SB_CRWRDA_NP, reg, &val);
}
-u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
+u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
+
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
SB_CRRDDA_NP, reg, &val);
+
return val;
}
-void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
SB_CRWRDA_NP, reg, &val);
}
-u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
+u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
{
+ int port = i915->dpio_phy_iosf_port[DPIO_PHY(pipe)];
u32 val = 0;
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
- SB_MRD_NP, reg, &val);
+ vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val);
/*
* FIXME: There might be some registers where all 1's is a valid value,
@@ -195,101 +249,286 @@ u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
return val;
}
-void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
+void vlv_dpio_write(struct drm_i915_private *i915,
+ enum pipe pipe, int reg, u32 val)
+{
+ int port = i915->dpio_phy_iosf_port[DPIO_PHY(pipe)];
+
+ vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val);
+}
+
+u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
+ reg, &val);
+ return val;
+}
+
+void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
- SB_MWR_NP, reg, &val);
+ vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
+ reg, &val);
}
/* SBI access */
-u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
- enum intel_sbi_destination destination)
+static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
+ enum intel_sbi_destination destination,
+ u32 *val, bool is_read)
{
- u32 value = 0;
- WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 cmd;
+
+ lockdep_assert_held(&i915->sb_lock);
- if (intel_wait_for_register(&dev_priv->uncore,
- SBI_CTL_STAT, SBI_BUSY, 0,
- 100)) {
+ if (intel_wait_for_register_fw(uncore,
+ SBI_CTL_STAT, SBI_BUSY, 0,
+ 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
- return 0;
+ return -EBUSY;
}
- I915_WRITE(SBI_ADDR, (reg << 16));
- I915_WRITE(SBI_DATA, 0);
+ intel_uncore_write_fw(uncore, SBI_ADDR, (u32)reg << 16);
+ intel_uncore_write_fw(uncore, SBI_DATA, is_read ? 0 : *val);
if (destination == SBI_ICLK)
- value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+ cmd = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
else
- value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
- I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
-
- if (intel_wait_for_register(&dev_priv->uncore,
- SBI_CTL_STAT,
- SBI_BUSY,
- 0,
- 100)) {
+ cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+ if (!is_read)
+ cmd |= BIT(8);
+ intel_uncore_write_fw(uncore, SBI_CTL_STAT, cmd | SBI_BUSY);
+
+ if (__intel_wait_for_register_fw(uncore,
+ SBI_CTL_STAT, SBI_BUSY, 0,
+ 100, 100, &cmd)) {
DRM_ERROR("timeout waiting for SBI to complete read\n");
- return 0;
+ return -ETIMEDOUT;
}
- if (I915_READ(SBI_CTL_STAT) & SBI_RESPONSE_FAIL) {
+ if (cmd & SBI_RESPONSE_FAIL) {
DRM_ERROR("error during SBI read of reg %x\n", reg);
- return 0;
+ return -ENXIO;
}
- return I915_READ(SBI_DATA);
+ if (is_read)
+ *val = intel_uncore_read_fw(uncore, SBI_DATA);
+
+ return 0;
+}
+
+u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
+ enum intel_sbi_destination destination)
+{
+ u32 result = 0;
+
+ intel_sbi_rw(i915, reg, destination, &result, true);
+
+ return result;
}
-void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
+void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
enum intel_sbi_destination destination)
{
- u32 tmp;
+ intel_sbi_rw(i915, reg, destination, &value, false);
+}
- WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
+static inline int gen6_check_mailbox_status(u32 mbox)
+{
+ switch (mbox & GEN6_PCODE_ERROR_MASK) {
+ case GEN6_PCODE_SUCCESS:
+ return 0;
+ case GEN6_PCODE_UNIMPLEMENTED_CMD:
+ return -ENODEV;
+ case GEN6_PCODE_ILLEGAL_CMD:
+ return -ENXIO;
+ case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ return -EOVERFLOW;
+ case GEN6_PCODE_TIMEOUT:
+ return -ETIMEDOUT;
+ default:
+ MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
+ return 0;
+ }
+}
- if (intel_wait_for_register(&dev_priv->uncore,
- SBI_CTL_STAT, SBI_BUSY, 0,
- 100)) {
- DRM_ERROR("timeout waiting for SBI to become ready\n");
- return;
+static inline int gen7_check_mailbox_status(u32 mbox)
+{
+ switch (mbox & GEN6_PCODE_ERROR_MASK) {
+ case GEN6_PCODE_SUCCESS:
+ return 0;
+ case GEN6_PCODE_ILLEGAL_CMD:
+ return -ENXIO;
+ case GEN7_PCODE_TIMEOUT:
+ return -ETIMEDOUT;
+ case GEN7_PCODE_ILLEGAL_DATA:
+ return -EINVAL;
+ case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ return -EOVERFLOW;
+ default:
+ MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
+ return 0;
}
+}
- I915_WRITE(SBI_ADDR, (reg << 16));
- I915_WRITE(SBI_DATA, value);
+static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
+ u32 mbox, u32 *val, u32 *val1,
+ int fast_timeout_us,
+ int slow_timeout_ms,
+ bool is_read)
+{
+ struct intel_uncore *uncore = &i915->uncore;
- if (destination == SBI_ICLK)
- tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
+ lockdep_assert_held(&i915->sb_lock);
+
+ /*
+ * GEN6_PCODE_* are outside of the forcewake domain, we can
+ * use te fw I915_READ variants to reduce the amount of work
+ * required when reading/writing.
+ */
+
+ if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
+ return -EAGAIN;
+
+ intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
+ intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0);
+ intel_uncore_write_fw(uncore,
+ GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+ if (__intel_wait_for_register_fw(uncore,
+ GEN6_PCODE_MAILBOX,
+ GEN6_PCODE_READY, 0,
+ fast_timeout_us,
+ slow_timeout_ms,
+ &mbox))
+ return -ETIMEDOUT;
+
+ if (is_read)
+ *val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
+ if (is_read && val1)
+ *val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
+
+ if (INTEL_GEN(i915) > 6)
+ return gen7_check_mailbox_status(mbox);
else
- tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
- I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
-
- if (intel_wait_for_register(&dev_priv->uncore,
- SBI_CTL_STAT,
- SBI_BUSY,
- 0,
- 100)) {
- DRM_ERROR("timeout waiting for SBI to complete write\n");
- return;
+ return gen6_check_mailbox_status(mbox);
+}
+
+int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
+ u32 *val, u32 *val1)
+{
+ int err;
+
+ mutex_lock(&i915->sb_lock);
+ err = __sandybridge_pcode_rw(i915, mbox, val, val1,
+ 500, 0,
+ true);
+ mutex_unlock(&i915->sb_lock);
+
+ if (err) {
+ DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
+ mbox, __builtin_return_address(0), err);
}
- if (I915_READ(SBI_CTL_STAT) & SBI_RESPONSE_FAIL) {
- DRM_ERROR("error during SBI write of %x to reg %x\n",
- value, reg);
- return;
+ return err;
+}
+
+int sandybridge_pcode_write_timeout(struct drm_i915_private *i915,
+ u32 mbox, u32 val,
+ int fast_timeout_us,
+ int slow_timeout_ms)
+{
+ int err;
+
+ mutex_lock(&i915->sb_lock);
+ err = __sandybridge_pcode_rw(i915, mbox, &val, NULL,
+ fast_timeout_us, slow_timeout_ms,
+ false);
+ mutex_unlock(&i915->sb_lock);
+
+ if (err) {
+ DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
+ val, mbox, __builtin_return_address(0), err);
}
+
+ return err;
}
-u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg)
+static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
+ u32 request, u32 reply_mask, u32 reply,
+ u32 *status)
{
- u32 val = 0;
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
- reg, &val);
- return val;
+ *status = __sandybridge_pcode_rw(i915, mbox, &request, NULL,
+ 500, 0,
+ true);
+
+ return *status || ((request & reply_mask) == reply);
}
-void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+/**
+ * skl_pcode_request - send PCODE request until acknowledgment
+ * @i915: device private
+ * @mbox: PCODE mailbox ID the request is targeted for
+ * @request: request ID
+ * @reply_mask: mask used to check for request acknowledgment
+ * @reply: value used to check for request acknowledgment
+ * @timeout_base_ms: timeout for polling with preemption enabled
+ *
+ * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
+ * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
+ * The request is acknowledged once the PCODE reply dword equals @reply after
+ * applying @reply_mask. Polling is first attempted with preemption enabled
+ * for @timeout_base_ms and if this times out for another 50 ms with
+ * preemption disabled.
+ *
+ * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
+ * other error as reported by PCODE.
+ */
+int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
{
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
- reg, &val);
+ u32 status;
+ int ret;
+
+ mutex_lock(&i915->sb_lock);
+
+#define COND \
+ skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status)
+
+ /*
+ * Prime the PCODE by doing a request first. Normally it guarantees
+ * that a subsequent request, at most @timeout_base_ms later, succeeds.
+ * _wait_for() doesn't guarantee when its passed condition is evaluated
+ * first, so send the first request explicitly.
+ */
+ if (COND) {
+ ret = 0;
+ goto out;
+ }
+ ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
+ if (!ret)
+ goto out;
+
+ /*
+ * The above can time out if the number of requests was low (2 in the
+ * worst case) _and_ PCODE was busy for some reason even after a
+ * (queued) request and @timeout_base_ms delay. As a workaround retry
+ * the poll with preemption disabled to maximize the number of
+ * requests. Increase the timeout from @timeout_base_ms to 50ms to
+ * account for interrupts that could reduce the number of these
+ * requests, and for any quirks of the PCODE firmware that delays
+ * the request completion.
+ */
+ DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
+ WARN_ON_ONCE(timeout_base_ms > 3);
+ preempt_disable();
+ ret = wait_for_atomic(COND, 50);
+ preempt_enable();
+
+out:
+ mutex_unlock(&i915->sb_lock);
+ return ret ? ret : status;
+#undef COND
}
diff --git a/drivers/gpu/drm/i915/intel_sideband.h b/drivers/gpu/drm/i915/intel_sideband.h
new file mode 100644
index 000000000000..7fb95745a444
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sideband.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef _INTEL_SIDEBAND_H_
+#define _INTEL_SIDEBAND_H_
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+struct drm_i915_private;
+enum pipe;
+
+enum intel_sbi_destination {
+ SBI_ICLK,
+ SBI_MPHY,
+};
+
+enum {
+ VLV_IOSF_SB_BUNIT,
+ VLV_IOSF_SB_CCK,
+ VLV_IOSF_SB_CCU,
+ VLV_IOSF_SB_DPIO,
+ VLV_IOSF_SB_FLISDSI,
+ VLV_IOSF_SB_GPIO,
+ VLV_IOSF_SB_NC,
+ VLV_IOSF_SB_PUNIT,
+};
+
+void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports);
+u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg);
+void vlv_iosf_sb_write(struct drm_i915_private *i915,
+ u8 port, u32 reg, u32 val);
+void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports);
+
+static inline void vlv_bunit_get(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_BUNIT));
+}
+
+u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg);
+void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val);
+
+static inline void vlv_bunit_put(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_BUNIT));
+}
+
+static inline void vlv_cck_get(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCK));
+}
+
+u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg);
+void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val);
+
+static inline void vlv_cck_put(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCK));
+}
+
+static inline void vlv_ccu_get(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCU));
+}
+
+u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg);
+void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val);
+
+static inline void vlv_ccu_put(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCU));
+}
+
+static inline void vlv_dpio_get(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_DPIO));
+}
+
+u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg);
+void vlv_dpio_write(struct drm_i915_private *i915,
+ enum pipe pipe, int reg, u32 val);
+
+static inline void vlv_dpio_put(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_DPIO));
+}
+
+static inline void vlv_flisdsi_get(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_FLISDSI));
+}
+
+u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg);
+void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val);
+
+static inline void vlv_flisdsi_put(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_FLISDSI));
+}
+
+static inline void vlv_nc_get(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_NC));
+}
+
+u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr);
+
+static inline void vlv_nc_put(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_NC));
+}
+
+static inline void vlv_punit_get(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_PUNIT));
+}
+
+u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr);
+int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val);
+
+static inline void vlv_punit_put(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT));
+}
+
+u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
+ enum intel_sbi_destination destination);
+void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
+ enum intel_sbi_destination destination);
+
+int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
+ u32 *val, u32 *val1);
+int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox,
+ u32 val, int fast_timeout_us,
+ int slow_timeout_ms);
+#define sandybridge_pcode_write(i915, mbox, val) \
+ sandybridge_pcode_write_timeout(i915, mbox, val, 500, 0)
+
+int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms);
+
+#endif /* _INTEL_SIDEBAND_H */
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 25b80ffe71ad..ae45651ac73c 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -22,11 +22,12 @@
*
*/
+#include "gt/intel_reset.h"
#include "intel_uc.h"
-#include "intel_guc_submission.h"
#include "intel_guc.h"
+#include "intel_guc_ads.h"
+#include "intel_guc_submission.h"
#include "i915_drv.h"
-#include "i915_reset.h"
static void guc_free_load_err_log(struct intel_guc *guc);
@@ -57,10 +58,8 @@ static int __get_platform_enable_guc(struct drm_i915_private *i915)
struct intel_uc_fw *huc_fw = &i915->huc.fw;
int enable_guc = 0;
- /* Default is to enable GuC/HuC if we know their firmwares */
- if (intel_uc_fw_is_selected(guc_fw))
- enable_guc |= ENABLE_GUC_SUBMISSION;
- if (intel_uc_fw_is_selected(huc_fw))
+ /* Default is to use HuC if we know GuC and HuC firmwares */
+ if (intel_uc_fw_is_selected(guc_fw) && intel_uc_fw_is_selected(huc_fw))
enable_guc |= ENABLE_GUC_LOAD_HUC;
/* Any platform specific fine-tuning can be done here */
@@ -132,6 +131,15 @@ static void sanitize_options_early(struct drm_i915_private *i915)
"no HuC firmware");
}
+ /* XXX: GuC submission is unavailable for now */
+ if (intel_uc_is_using_guc_submission(i915)) {
+ DRM_INFO("Incompatible option detected: %s=%d, %s!\n",
+ "enable_guc", i915_modparams.enable_guc,
+ "GuC submission not supported");
+ DRM_INFO("Switching to non-GuC submission mode!\n");
+ i915_modparams.enable_guc &= ~ENABLE_GUC_SUBMISSION;
+ }
+
/* A negative value means "use platform/config default" */
if (i915_modparams.guc_log_level < 0)
i915_modparams.guc_log_level =
@@ -210,28 +218,41 @@ static void guc_free_load_err_log(struct intel_guc *guc)
i915_gem_object_put(guc->load_err_log);
}
+static void guc_reset_interrupts(struct intel_guc *guc)
+{
+ guc->interrupts.reset(guc_to_i915(guc));
+}
+
+static void guc_enable_interrupts(struct intel_guc *guc)
+{
+ guc->interrupts.enable(guc_to_i915(guc));
+}
+
+static void guc_disable_interrupts(struct intel_guc *guc)
+{
+ guc->interrupts.disable(guc_to_i915(guc));
+}
+
static int guc_enable_communication(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_i915(guc);
+ guc_enable_interrupts(guc);
- gen9_enable_guc_interrupts(i915);
+ return intel_guc_ct_enable(&guc->ct);
+}
- if (HAS_GUC_CT(i915))
- return intel_guc_ct_enable(&guc->ct);
+static void guc_stop_communication(struct intel_guc *guc)
+{
+ intel_guc_ct_stop(&guc->ct);
- guc->send = intel_guc_send_mmio;
- guc->handler = intel_guc_to_host_event_handler_mmio;
- return 0;
+ guc->send = intel_guc_send_nop;
+ guc->handler = intel_guc_to_host_event_handler_nop;
}
static void guc_disable_communication(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_i915(guc);
+ intel_guc_ct_disable(&guc->ct);
- if (HAS_GUC_CT(i915))
- intel_guc_ct_disable(&guc->ct);
-
- gen9_disable_guc_interrupts(i915);
+ guc_disable_interrupts(guc);
guc->send = intel_guc_send_nop;
guc->handler = intel_guc_to_host_event_handler_nop;
@@ -280,6 +301,7 @@ void intel_uc_fini_misc(struct drm_i915_private *i915)
int intel_uc_init(struct drm_i915_private *i915)
{
struct intel_guc *guc = &i915->guc;
+ struct intel_huc *huc = &i915->huc;
int ret;
if (!USES_GUC(i915))
@@ -288,23 +310,37 @@ int intel_uc_init(struct drm_i915_private *i915)
if (!HAS_GUC(i915))
return -ENODEV;
+ /* XXX: GuC submission is unavailable for now */
+ GEM_BUG_ON(USES_GUC_SUBMISSION(i915));
+
ret = intel_guc_init(guc);
if (ret)
return ret;
+ if (USES_HUC(i915)) {
+ ret = intel_huc_init(huc);
+ if (ret)
+ goto err_guc;
+ }
+
if (USES_GUC_SUBMISSION(i915)) {
/*
* This is stuff we need to have available at fw load time
* if we are planning to enable submission later
*/
ret = intel_guc_submission_init(guc);
- if (ret) {
- intel_guc_fini(guc);
- return ret;
- }
+ if (ret)
+ goto err_huc;
}
return 0;
+
+err_huc:
+ if (USES_HUC(i915))
+ intel_huc_fini(huc);
+err_guc:
+ intel_guc_fini(guc);
+ return ret;
}
void intel_uc_fini(struct drm_i915_private *i915)
@@ -319,17 +355,17 @@ void intel_uc_fini(struct drm_i915_private *i915)
if (USES_GUC_SUBMISSION(i915))
intel_guc_submission_fini(guc);
+ if (USES_HUC(i915))
+ intel_huc_fini(&i915->huc);
+
intel_guc_fini(guc);
}
-void intel_uc_sanitize(struct drm_i915_private *i915)
+static void __uc_sanitize(struct drm_i915_private *i915)
{
struct intel_guc *guc = &i915->guc;
struct intel_huc *huc = &i915->huc;
- if (!USES_GUC(i915))
- return;
-
GEM_BUG_ON(!HAS_GUC(i915));
intel_huc_sanitize(huc);
@@ -338,6 +374,14 @@ void intel_uc_sanitize(struct drm_i915_private *i915)
__intel_uc_reset_hw(i915);
}
+void intel_uc_sanitize(struct drm_i915_private *i915)
+{
+ if (!USES_GUC(i915))
+ return;
+
+ __uc_sanitize(i915);
+}
+
int intel_uc_init_hw(struct drm_i915_private *i915)
{
struct intel_guc *guc = &i915->guc;
@@ -349,7 +393,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
GEM_BUG_ON(!HAS_GUC(i915));
- gen9_reset_guc_interrupts(i915);
+ guc_reset_interrupts(guc);
/* WaEnableuKernelHeaderValidFix:skl */
/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
@@ -373,6 +417,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
goto err_out;
}
+ intel_guc_ads_reset(guc);
intel_guc_init_params(guc);
ret = intel_guc_fw_upload(guc);
if (ret == 0)
@@ -396,14 +441,14 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
goto err_communication;
}
+ ret = intel_guc_sample_forcewake(guc);
+ if (ret)
+ goto err_communication;
+
if (USES_GUC_SUBMISSION(i915)) {
ret = intel_guc_submission_enable(guc);
if (ret)
goto err_communication;
- } else if (INTEL_GEN(i915) < 11) {
- ret = intel_guc_sample_forcewake(guc);
- if (ret)
- goto err_communication;
}
dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",
@@ -423,6 +468,8 @@ err_communication:
err_log_capture:
guc_capture_load_err_log(guc);
err_out:
+ __uc_sanitize(i915);
+
/*
* Note that there is no fallback as either user explicitly asked for
* the GuC or driver default option was to run with the GuC enabled.
@@ -438,7 +485,7 @@ void intel_uc_fini_hw(struct drm_i915_private *i915)
{
struct intel_guc *guc = &i915->guc;
- if (!USES_GUC(i915))
+ if (!intel_guc_is_loaded(guc))
return;
GEM_BUG_ON(!HAS_GUC(i915));
@@ -447,6 +494,7 @@ void intel_uc_fini_hw(struct drm_i915_private *i915)
intel_guc_submission_disable(guc);
guc_disable_communication(guc);
+ __uc_sanitize(i915);
}
/**
@@ -459,33 +507,38 @@ void intel_uc_reset_prepare(struct drm_i915_private *i915)
{
struct intel_guc *guc = &i915->guc;
- if (!USES_GUC(i915))
+ if (!intel_guc_is_loaded(guc))
return;
- guc_disable_communication(guc);
- intel_uc_sanitize(i915);
+ guc_stop_communication(guc);
+ __uc_sanitize(i915);
}
-int intel_uc_suspend(struct drm_i915_private *i915)
+void intel_uc_runtime_suspend(struct drm_i915_private *i915)
{
struct intel_guc *guc = &i915->guc;
int err;
- if (!USES_GUC(i915))
- return 0;
-
- if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
- return 0;
+ if (!intel_guc_is_loaded(guc))
+ return;
err = intel_guc_suspend(guc);
- if (err) {
+ if (err)
DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
- return err;
- }
guc_disable_communication(guc);
+}
- return 0;
+void intel_uc_suspend(struct drm_i915_private *i915)
+{
+ struct intel_guc *guc = &i915->guc;
+ intel_wakeref_t wakeref;
+
+ if (!intel_guc_is_loaded(guc))
+ return;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ intel_uc_runtime_suspend(i915);
}
int intel_uc_resume(struct drm_i915_private *i915)
@@ -493,10 +546,7 @@ int intel_uc_resume(struct drm_i915_private *i915)
struct intel_guc *guc = &i915->guc;
int err;
- if (!USES_GUC(i915))
- return 0;
-
- if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
+ if (!intel_guc_is_loaded(guc))
return 0;
guc_enable_communication(guc);
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index c14729786652..3ea06c87dfcd 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -39,7 +39,8 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
int intel_uc_init(struct drm_i915_private *dev_priv);
void intel_uc_fini(struct drm_i915_private *dev_priv);
void intel_uc_reset_prepare(struct drm_i915_private *i915);
-int intel_uc_suspend(struct drm_i915_private *dev_priv);
+void intel_uc_suspend(struct drm_i915_private *i915);
+void intel_uc_runtime_suspend(struct drm_i915_private *i915);
int intel_uc_resume(struct drm_i915_private *dev_priv);
static inline bool intel_uc_is_using_guc(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
index becf05ebae4d..f342ddd47df8 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -22,6 +22,7 @@
*
*/
+#include <linux/bitfield.h>
#include <linux/firmware.h>
#include <drm/drm_print.h>
@@ -119,21 +120,20 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
goto fail;
}
- /*
- * The GuC firmware image has the version number embedded at a
- * well-known offset within the firmware blob; note that major / minor
- * version are TWO bytes each (i.e. u16), although all pointers and
- * offsets are defined in terms of bytes (u8).
- */
+ /* Get version numbers from the CSS header */
switch (uc_fw->type) {
case INTEL_UC_FW_TYPE_GUC:
- uc_fw->major_ver_found = css->guc.sw_version >> 16;
- uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
+ uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MAJOR,
+ css->sw_version);
+ uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MINOR,
+ css->sw_version);
break;
case INTEL_UC_FW_TYPE_HUC:
- uc_fw->major_ver_found = css->huc.sw_version >> 16;
- uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
+ uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MAJOR,
+ css->sw_version);
+ uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MINOR,
+ css->sw_version);
break;
default:
@@ -159,7 +159,8 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
goto fail;
}
- obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
+ obj = i915_gem_object_create_shmem_from_data(dev_priv,
+ fw->data, fw->size);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
DRM_DEBUG_DRIVER("%s fw object_create err=%d\n",
@@ -191,6 +192,35 @@ fail:
release_firmware(fw); /* OK even if fw is NULL */
}
+static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw)
+{
+ struct drm_i915_gem_object *obj = uc_fw->obj;
+ struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
+ struct i915_vma dummy = {
+ .node.start = intel_uc_fw_ggtt_offset(uc_fw),
+ .node.size = obj->base.size,
+ .pages = obj->mm.pages,
+ .vm = &ggtt->vm,
+ };
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ GEM_BUG_ON(dummy.node.size > ggtt->uc_fw.size);
+
+ /* uc_fw->obj cache domains were not controlled across suspend */
+ drm_clflush_sg(dummy.pages);
+
+ ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0);
+}
+
+static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw)
+{
+ struct drm_i915_gem_object *obj = uc_fw->obj;
+ struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
+ u64 start = intel_uc_fw_ggtt_offset(uc_fw);
+
+ ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
+}
+
/**
* intel_uc_fw_upload - load uC firmware using custom loader
* @uc_fw: uC firmware
@@ -201,11 +231,8 @@ fail:
* Return: 0 on success, non-zero on failure.
*/
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
- int (*xfer)(struct intel_uc_fw *uc_fw,
- struct i915_vma *vma))
+ int (*xfer)(struct intel_uc_fw *uc_fw))
{
- struct i915_vma *vma;
- u32 ggtt_pin_bias;
int err;
DRM_DEBUG_DRIVER("%s fw load %s\n",
@@ -219,33 +246,10 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
intel_uc_fw_type_repr(uc_fw->type),
intel_uc_fw_status_repr(uc_fw->load_status));
- /* Pin object with firmware */
- err = i915_gem_object_set_to_gtt_domain(uc_fw->obj, false);
- if (err) {
- DRM_DEBUG_DRIVER("%s fw set-domain err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
- goto fail;
- }
-
- ggtt_pin_bias = to_i915(uc_fw->obj->base.dev)->ggtt.pin_bias;
- vma = i915_gem_object_ggtt_pin(uc_fw->obj, NULL, 0, 0,
- PIN_OFFSET_BIAS | ggtt_pin_bias);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- DRM_DEBUG_DRIVER("%s fw ggtt-pin err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
- goto fail;
- }
-
/* Call custom loader */
- err = xfer(uc_fw, vma);
-
- /*
- * We keep the object pages for reuse during resume. But we can unpin it
- * now that DMA has completed, so it doesn't continue to take up space.
- */
- i915_vma_unpin(vma);
-
+ intel_uc_fw_ggtt_bind(uc_fw);
+ err = xfer(uc_fw);
+ intel_uc_fw_ggtt_unbind(uc_fw);
if (err)
goto fail;
@@ -273,14 +277,50 @@ fail:
return err;
}
+int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
+{
+ int err;
+
+ if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
+ return -ENOEXEC;
+
+ err = i915_gem_object_pin_pages(uc_fw->obj);
+ if (err)
+ DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
+ intel_uc_fw_type_repr(uc_fw->type), err);
+
+ return err;
+}
+
+void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
+{
+ if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
+ return;
+
+ i915_gem_object_unpin_pages(uc_fw->obj);
+}
+
+u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
+{
+ struct drm_i915_private *i915 = to_i915(uc_fw->obj->base.dev);
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ struct drm_mm_node *node = &ggtt->uc_fw;
+
+ GEM_BUG_ON(!node->allocated);
+ GEM_BUG_ON(upper_32_bits(node->start));
+ GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
+
+ return lower_32_bits(node->start);
+}
+
/**
- * intel_uc_fw_fini - cleanup uC firmware
+ * intel_uc_fw_cleanup_fetch - cleanup uC firmware
*
* @uc_fw: uC firmware
*
* Cleans up uC firmware by releasing the firmware GEM obj.
*/
-void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
+void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
{
struct drm_i915_gem_object *obj;
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
index 0e3bd580e267..ff98f8661d72 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/intel_uc_fw.h
@@ -27,7 +27,6 @@
struct drm_printer;
struct drm_i915_private;
-struct i915_vma;
/* Home of GuC, HuC and DMC firmwares */
#define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915"
@@ -102,7 +101,8 @@ static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
}
static inline
-void intel_uc_fw_init(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
+void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_type type)
{
uc_fw->path = NULL;
uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
@@ -144,10 +144,12 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
struct intel_uc_fw *uc_fw);
+void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw);
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
- int (*xfer)(struct intel_uc_fw *uc_fw,
- struct i915_vma *vma));
+ int (*xfer)(struct intel_uc_fw *uc_fw));
+int intel_uc_fw_init(struct intel_uc_fw *uc_fw);
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
+u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw);
void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p);
#endif
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index d1d51e1121e2..da33aa672c3d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -111,9 +111,11 @@ wait_ack_set(const struct intel_uncore_forcewake_domain *d,
static inline void
fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
{
- if (wait_ack_clear(d, FORCEWAKE_KERNEL))
+ if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
intel_uncore_forcewake_domain_to_str(d->id));
+ add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
+ }
}
enum ack_type {
@@ -186,9 +188,11 @@ fw_domain_get(const struct intel_uncore_forcewake_domain *d)
static inline void
fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
{
- if (wait_ack_set(d, FORCEWAKE_KERNEL))
+ if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
intel_uncore_forcewake_domain_to_str(d->id));
+ add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
+ }
}
static inline void
@@ -579,7 +583,7 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
if (!uncore->funcs.force_wake_get)
return;
- __assert_rpm_wakelock_held(uncore->rpm);
+ assert_rpm_wakelock_held(uncore->rpm);
spin_lock_irqsave(&uncore->lock, irqflags);
__intel_uncore_forcewake_get(uncore, fw_domains);
@@ -733,7 +737,7 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
if (!uncore->funcs.force_wake_get)
return;
- __assert_rpm_wakelock_held(uncore->rpm);
+ assert_rpm_wakelock_held(uncore->rpm);
fw_domains &= uncore->fw_domains;
WARN(fw_domains & ~uncore->fw_domains_active,
@@ -1050,7 +1054,7 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
#define GEN2_READ_HEADER(x) \
u##x val = 0; \
- __assert_rpm_wakelock_held(uncore->rpm);
+ assert_rpm_wakelock_held(uncore->rpm);
#define GEN2_READ_FOOTER \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
@@ -1092,7 +1096,7 @@ __gen2_read(64)
u32 offset = i915_mmio_reg_offset(reg); \
unsigned long irqflags; \
u##x val = 0; \
- __assert_rpm_wakelock_held(uncore->rpm); \
+ assert_rpm_wakelock_held(uncore->rpm); \
spin_lock_irqsave(&uncore->lock, irqflags); \
unclaimed_reg_debug(uncore, reg, true, true)
@@ -1166,7 +1170,7 @@ __gen6_read(64)
#define GEN2_WRITE_HEADER \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
- __assert_rpm_wakelock_held(uncore->rpm); \
+ assert_rpm_wakelock_held(uncore->rpm); \
#define GEN2_WRITE_FOOTER
@@ -1204,7 +1208,7 @@ __gen2_write(32)
u32 offset = i915_mmio_reg_offset(reg); \
unsigned long irqflags; \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
- __assert_rpm_wakelock_held(uncore->rpm); \
+ assert_rpm_wakelock_held(uncore->rpm); \
spin_lock_irqsave(&uncore->lock, irqflags); \
unclaimed_reg_debug(uncore, reg, false, true)
@@ -1457,8 +1461,8 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct drm_i915_private *dev_priv = container_of(nb,
- struct drm_i915_private, uncore.pmic_bus_access_nb);
+ struct intel_uncore *uncore = container_of(nb,
+ struct intel_uncore, pmic_bus_access_nb);
switch (action) {
case MBI_PMIC_BUS_ACCESS_BEGIN:
@@ -1475,12 +1479,12 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
* wake reference -> disable wakeref asserts for the time of
* the access.
*/
- disable_rpm_wakeref_asserts(dev_priv);
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
- enable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(uncore->rpm);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+ enable_rpm_wakeref_asserts(uncore->rpm);
break;
case MBI_PMIC_BUS_ACCESS_END:
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
break;
}
@@ -1668,7 +1672,8 @@ static const struct reg_whitelist {
int i915_reg_read_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_uncore *uncore = &i915->uncore;
struct drm_i915_reg_read *reg = data;
struct reg_whitelist const *entry;
intel_wakeref_t wakeref;
@@ -1685,7 +1690,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
GEM_BUG_ON(entry->size > 8);
GEM_BUG_ON(entry_offset & (entry->size - 1));
- if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask &&
+ if (INTEL_INFO(i915)->gen_mask & entry->gen_mask &&
entry_offset == (reg->offset & -entry->size))
break;
entry++;
@@ -1697,18 +1702,22 @@ int i915_reg_read_ioctl(struct drm_device *dev,
flags = reg->offset & (entry->size - 1);
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
- reg->val = I915_READ64_2x32(entry->offset_ldw,
- entry->offset_udw);
+ reg->val = intel_uncore_read64_2x32(uncore,
+ entry->offset_ldw,
+ entry->offset_udw);
else if (entry->size == 8 && flags == 0)
- reg->val = I915_READ64(entry->offset_ldw);
+ reg->val = intel_uncore_read64(uncore,
+ entry->offset_ldw);
else if (entry->size == 4 && flags == 0)
- reg->val = I915_READ(entry->offset_ldw);
+ reg->val = intel_uncore_read(uncore, entry->offset_ldw);
else if (entry->size == 2 && flags == 0)
- reg->val = I915_READ16(entry->offset_ldw);
+ reg->val = intel_uncore_read16(uncore,
+ entry->offset_ldw);
else if (entry->size == 1 && flags == 0)
- reg->val = I915_READ8(entry->offset_ldw);
+ reg->val = intel_uncore_read8(uncore,
+ entry->offset_ldw);
else
ret = -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index d6af3de70121..804a0faacc91 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -33,7 +33,7 @@
#include "i915_reg.h"
struct drm_i915_private;
-struct i915_runtime_pm;
+struct intel_runtime_pm;
struct intel_uncore;
enum forcewake_domain_id {
@@ -97,7 +97,7 @@ struct intel_forcewake_range {
struct intel_uncore {
void __iomem *regs;
- struct i915_runtime_pm *rpm;
+ struct intel_runtime_pm *rpm;
spinlock_t lock; /** lock is also taken in irq contexts. */
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
new file mode 100644
index 000000000000..3db6fa682823
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -0,0 +1,138 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_runtime_pm.h"
+#include "i915_gem.h"
+
+static void rpm_get(struct intel_runtime_pm *rpm, struct intel_wakeref *wf)
+{
+ wf->wakeref = intel_runtime_pm_get(rpm);
+}
+
+static void rpm_put(struct intel_runtime_pm *rpm, struct intel_wakeref *wf)
+{
+ intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
+
+ intel_runtime_pm_put(rpm, wakeref);
+ GEM_BUG_ON(!wakeref);
+}
+
+int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
+ struct intel_wakeref *wf,
+ int (*fn)(struct intel_wakeref *wf))
+{
+ /*
+ * Treat get/put as different subclasses, as we may need to run
+ * the put callback from under the shrinker and do not want to
+ * cross-contanimate that callback with any extra work performed
+ * upon acquiring the wakeref.
+ */
+ mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING);
+ if (!atomic_read(&wf->count)) {
+ int err;
+
+ rpm_get(rpm, wf);
+
+ err = fn(wf);
+ if (unlikely(err)) {
+ rpm_put(rpm, wf);
+ mutex_unlock(&wf->mutex);
+ return err;
+ }
+
+ smp_mb__before_atomic(); /* release wf->count */
+ }
+ atomic_inc(&wf->count);
+ mutex_unlock(&wf->mutex);
+
+ return 0;
+}
+
+int __intel_wakeref_put_last(struct intel_runtime_pm *rpm,
+ struct intel_wakeref *wf,
+ int (*fn)(struct intel_wakeref *wf))
+{
+ int err;
+
+ err = fn(wf);
+ if (likely(!err))
+ rpm_put(rpm, wf);
+ else
+ atomic_inc(&wf->count);
+ mutex_unlock(&wf->mutex);
+
+ return err;
+}
+
+void __intel_wakeref_init(struct intel_wakeref *wf, struct lock_class_key *key)
+{
+ __mutex_init(&wf->mutex, "wakeref", key);
+ atomic_set(&wf->count, 0);
+ wf->wakeref = 0;
+}
+
+static void wakeref_auto_timeout(struct timer_list *t)
+{
+ struct intel_wakeref_auto *wf = from_timer(wf, t, timer);
+ intel_wakeref_t wakeref;
+ unsigned long flags;
+
+ if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags))
+ return;
+
+ wakeref = fetch_and_zero(&wf->wakeref);
+ spin_unlock_irqrestore(&wf->lock, flags);
+
+ intel_runtime_pm_put(wf->rpm, wakeref);
+}
+
+void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
+ struct intel_runtime_pm *rpm)
+{
+ spin_lock_init(&wf->lock);
+ timer_setup(&wf->timer, wakeref_auto_timeout, 0);
+ refcount_set(&wf->count, 0);
+ wf->wakeref = 0;
+ wf->rpm = rpm;
+}
+
+void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
+{
+ unsigned long flags;
+
+ if (!timeout) {
+ if (del_timer_sync(&wf->timer))
+ wakeref_auto_timeout(&wf->timer);
+ return;
+ }
+
+ /* Our mission is that we only extend an already active wakeref */
+ assert_rpm_wakelock_held(wf->rpm);
+
+ if (!refcount_inc_not_zero(&wf->count)) {
+ spin_lock_irqsave(&wf->lock, flags);
+ if (!refcount_inc_not_zero(&wf->count)) {
+ GEM_BUG_ON(wf->wakeref);
+ wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm);
+ refcount_set(&wf->count, 1);
+ }
+ spin_unlock_irqrestore(&wf->lock, flags);
+ }
+
+ /*
+ * If we extend a pending timer, we will only get a single timer
+ * callback and so need to cancel the local inc by running the
+ * elided callback to keep the wf->count balanced.
+ */
+ if (mod_timer(&wf->timer, jiffies + timeout))
+ wakeref_auto_timeout(&wf->timer);
+}
+
+void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
+{
+ intel_wakeref_auto(wf, 0);
+ GEM_BUG_ON(wf->wakeref);
+}
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
new file mode 100644
index 000000000000..9cbb2ebf575b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -0,0 +1,164 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_WAKEREF_H
+#define INTEL_WAKEREF_H
+
+#include <linux/atomic.h>
+#include <linux/mutex.h>
+#include <linux/refcount.h>
+#include <linux/stackdepot.h>
+#include <linux/timer.h>
+
+struct intel_runtime_pm;
+
+typedef depot_stack_handle_t intel_wakeref_t;
+
+struct intel_wakeref {
+ atomic_t count;
+ struct mutex mutex;
+ intel_wakeref_t wakeref;
+};
+
+void __intel_wakeref_init(struct intel_wakeref *wf,
+ struct lock_class_key *key);
+#define intel_wakeref_init(wf) do { \
+ static struct lock_class_key __key; \
+ \
+ __intel_wakeref_init((wf), &__key); \
+} while (0)
+
+int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
+ struct intel_wakeref *wf,
+ int (*fn)(struct intel_wakeref *wf));
+int __intel_wakeref_put_last(struct intel_runtime_pm *rpm,
+ struct intel_wakeref *wf,
+ int (*fn)(struct intel_wakeref *wf));
+
+/**
+ * intel_wakeref_get: Acquire the wakeref
+ * @i915: the drm_i915_private device
+ * @wf: the wakeref
+ * @fn: callback for acquired the wakeref, called only on first acquire.
+ *
+ * Acquire a hold on the wakeref. The first user to do so, will acquire
+ * the runtime pm wakeref and then call the @fn underneath the wakeref
+ * mutex.
+ *
+ * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
+ * will be released and the acquisition unwound, and an error reported.
+ *
+ * Returns: 0 if the wakeref was acquired successfully, or a negative error
+ * code otherwise.
+ */
+static inline int
+intel_wakeref_get(struct intel_runtime_pm *rpm,
+ struct intel_wakeref *wf,
+ int (*fn)(struct intel_wakeref *wf))
+{
+ if (unlikely(!atomic_inc_not_zero(&wf->count)))
+ return __intel_wakeref_get_first(rpm, wf, fn);
+
+ return 0;
+}
+
+/**
+ * intel_wakeref_put: Release the wakeref
+ * @i915: the drm_i915_private device
+ * @wf: the wakeref
+ * @fn: callback for releasing the wakeref, called only on final release.
+ *
+ * Release our hold on the wakeref. When there are no more users,
+ * the runtime pm wakeref will be released after the @fn callback is called
+ * underneath the wakeref mutex.
+ *
+ * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
+ * is retained and an error reported.
+ *
+ * Returns: 0 if the wakeref was released successfully, or a negative error
+ * code otherwise.
+ */
+static inline int
+intel_wakeref_put(struct intel_runtime_pm *rpm,
+ struct intel_wakeref *wf,
+ int (*fn)(struct intel_wakeref *wf))
+{
+ if (atomic_dec_and_mutex_lock(&wf->count, &wf->mutex))
+ return __intel_wakeref_put_last(rpm, wf, fn);
+
+ return 0;
+}
+
+/**
+ * intel_wakeref_lock: Lock the wakeref (mutex)
+ * @wf: the wakeref
+ *
+ * Locks the wakeref to prevent it being acquired or released. New users
+ * can still adjust the counter, but the wakeref itself (and callback)
+ * cannot be acquired or released.
+ */
+static inline void
+intel_wakeref_lock(struct intel_wakeref *wf)
+ __acquires(wf->mutex)
+{
+ mutex_lock(&wf->mutex);
+}
+
+/**
+ * intel_wakeref_unlock: Unlock the wakeref
+ * @wf: the wakeref
+ *
+ * Releases a previously acquired intel_wakeref_lock().
+ */
+static inline void
+intel_wakeref_unlock(struct intel_wakeref *wf)
+ __releases(wf->mutex)
+{
+ mutex_unlock(&wf->mutex);
+}
+
+/**
+ * intel_wakeref_active: Query whether the wakeref is currently held
+ * @wf: the wakeref
+ *
+ * Returns: true if the wakeref is currently held.
+ */
+static inline bool
+intel_wakeref_active(struct intel_wakeref *wf)
+{
+ return READ_ONCE(wf->wakeref);
+}
+
+struct intel_wakeref_auto {
+ struct intel_runtime_pm *rpm;
+ struct timer_list timer;
+ intel_wakeref_t wakeref;
+ spinlock_t lock;
+ refcount_t count;
+};
+
+/**
+ * intel_wakeref_auto: Delay the runtime-pm autosuspend
+ * @wf: the wakeref
+ * @timeout: relative timeout in jiffies
+ *
+ * The runtime-pm core uses a suspend delay after the last wakeref
+ * is released before triggering runtime suspend of the device. That
+ * delay is configurable via sysfs with little regard to the device
+ * characteristics. Instead, we want to tune the autosuspend based on our
+ * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied
+ * timeout.
+ *
+ * Pass @timeout = 0 to cancel a previous autosuspend by executing the
+ * suspend immediately.
+ */
+void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout);
+
+void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
+ struct intel_runtime_pm *rpm);
+void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf);
+
+#endif /* INTEL_WAKEREF_H */
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index f82a415ea2ba..7b4ba84b9fb8 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -41,26 +41,27 @@
* context).
*/
-/* Default WOPCM size 1MB. */
-#define GEN9_WOPCM_SIZE (1024 * 1024)
+/* Default WOPCM size is 2MB from Gen11, 1MB on previous platforms */
+#define GEN11_WOPCM_SIZE SZ_2M
+#define GEN9_WOPCM_SIZE SZ_1M
/* 16KB WOPCM (RSVD WOPCM) is reserved from HuC firmware top. */
-#define WOPCM_RESERVED_SIZE (16 * 1024)
+#define WOPCM_RESERVED_SIZE SZ_16K
/* 16KB reserved at the beginning of GuC WOPCM. */
-#define GUC_WOPCM_RESERVED (16 * 1024)
+#define GUC_WOPCM_RESERVED SZ_16K
/* 8KB from GUC_WOPCM_RESERVED is reserved for GuC stack. */
-#define GUC_WOPCM_STACK_RESERVED (8 * 1024)
+#define GUC_WOPCM_STACK_RESERVED SZ_8K
/* GuC WOPCM Offset value needs to be aligned to 16KB. */
#define GUC_WOPCM_OFFSET_ALIGNMENT (1UL << GUC_WOPCM_OFFSET_SHIFT)
/* 24KB at the end of WOPCM is reserved for RC6 CTX on BXT. */
-#define BXT_WOPCM_RC6_CTX_RESERVED (24 * 1024)
+#define BXT_WOPCM_RC6_CTX_RESERVED (SZ_16K + SZ_8K)
/* 36KB WOPCM reserved at the end of WOPCM on CNL. */
-#define CNL_WOPCM_HW_CTX_RESERVED (36 * 1024)
+#define CNL_WOPCM_HW_CTX_RESERVED (SZ_32K + SZ_4K)
/* 128KB from GUC_WOPCM_RESERVED is reserved for FW on Gen9. */
-#define GEN9_GUC_FW_RESERVED (128 * 1024)
+#define GEN9_GUC_FW_RESERVED SZ_128K
#define GEN9_GUC_WOPCM_OFFSET (GUC_WOPCM_RESERVED + GEN9_GUC_FW_RESERVED)
/**
@@ -71,7 +72,15 @@
*/
void intel_wopcm_init_early(struct intel_wopcm *wopcm)
{
- wopcm->size = GEN9_WOPCM_SIZE;
+ struct drm_i915_private *i915 = wopcm_to_i915(wopcm);
+
+ if (!HAS_GUC(i915))
+ return;
+
+ if (INTEL_GEN(i915) >= 11)
+ wopcm->size = GEN11_WOPCM_SIZE;
+ else
+ wopcm->size = GEN9_WOPCM_SIZE;
DRM_DEBUG_DRIVER("WOPCM size: %uKiB\n", wopcm->size / 1024);
}
diff --git a/drivers/gpu/drm/i915/intel_wopcm.h b/drivers/gpu/drm/i915/intel_wopcm.h
index 6298910a384c..114401971520 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.h
+++ b/drivers/gpu/drm/i915/intel_wopcm.h
@@ -24,6 +24,21 @@ struct intel_wopcm {
} guc;
};
+/**
+ * intel_wopcm_guc_size()
+ * @wopcm: intel_wopcm structure
+ *
+ * Returns size of the WOPCM shadowed region.
+ *
+ * Returns:
+ * 0 if GuC is not present or not in use.
+ * Otherwise, the GuC WOPCM size.
+ */
+static inline u32 intel_wopcm_guc_size(struct intel_wopcm *wopcm)
+{
+ return wopcm->guc.size;
+}
+
void intel_wopcm_init_early(struct intel_wopcm *wopcm);
int intel_wopcm_init(struct intel_wopcm *wopcm);
int intel_wopcm_init_hw(struct intel_wopcm *wopcm);
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.h b/drivers/gpu/drm/i915/selftests/huge_gem_object.h
deleted file mode 100644
index a6133a9e8029..000000000000
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __HUGE_GEM_OBJECT_H
-#define __HUGE_GEM_OBJECT_H
-
-struct drm_i915_gem_object *
-huge_gem_object(struct drm_i915_private *i915,
- phys_addr_t phys_size,
- dma_addr_t dma_size);
-
-static inline phys_addr_t
-huge_gem_object_phys_size(struct drm_i915_gem_object *obj)
-{
- return obj->scratch;
-}
-
-static inline dma_addr_t
-huge_gem_object_dma_size(struct drm_i915_gem_object *obj)
-{
- return obj->base.size;
-}
-
-#endif /* !__HUGE_GEM_OBJECT_H */
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 27d8f853111b..c0b3537a5fa6 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -4,7 +4,9 @@
* Copyright © 2018 Intel Corporation
*/
-#include "../i915_selftest.h"
+#include "gem/i915_gem_pm.h"
+
+#include "i915_selftest.h"
#include "igt_flush_test.h"
#include "lib_sw_fence.h"
@@ -46,7 +48,7 @@ static int __live_active_setup(struct drm_i915_private *i915,
for_each_engine(engine, i915, id) {
struct i915_request *rq;
- rq = i915_request_alloc(engine, i915->kernel_context);
+ rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
@@ -95,7 +97,7 @@ static int live_active_wait(void *arg)
/* Check that we get a callback when requests retire upon waiting */
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
err = __live_active_setup(i915, &active);
@@ -109,7 +111,7 @@ static int live_active_wait(void *arg)
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -124,7 +126,7 @@ static int live_active_retire(void *arg)
/* Check that we get a callback when requests are indirectly retired */
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
err = __live_active_setup(i915, &active);
@@ -138,7 +140,7 @@ static int live_active_retire(void *arg)
}
i915_active_fini(&active.base);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 6fd70d326468..c6a01a6e87f1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -6,36 +6,31 @@
#include <linux/random.h>
-#include "../i915_selftest.h"
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
+
+#include "i915_selftest.h"
-#include "mock_context.h"
#include "igt_flush_test.h"
+#include "mock_drm.h"
static int switch_to_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
- int err = 0;
-
- wakeref = intel_runtime_pm_get(i915);
for_each_engine(engine, i915, id) {
struct i915_request *rq;
- rq = i915_request_alloc(engine, ctx);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- break;
- }
+ rq = igt_request_alloc(ctx, engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
i915_request_add(rq);
}
- intel_runtime_pm_put(i915, wakeref);
-
- return err;
+ return 0;
}
static void trash_stolen(struct drm_i915_private *i915)
@@ -68,7 +63,7 @@ static void simulate_hibernate(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
/*
* As a final sting in the tail, invalidate stolen. Under a real S4,
@@ -79,7 +74,7 @@ static void simulate_hibernate(struct drm_i915_private *i915)
*/
trash_stolen(i915);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static int pm_prepare(struct drm_i915_private *i915)
@@ -93,7 +88,7 @@ static void pm_suspend(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
i915_gem_suspend_gtt_mappings(i915);
i915_gem_suspend_late(i915);
}
@@ -103,7 +98,7 @@ static void pm_hibernate(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
i915_gem_suspend_gtt_mappings(i915);
i915_gem_freeze(i915);
@@ -119,8 +114,8 @@ static void pm_resume(struct drm_i915_private *i915)
* Both suspend and hibernate follow the same wakeup path and assume
* that runtime-pm just works.
*/
- with_intel_runtime_pm(i915, wakeref) {
- intel_engines_sanitize(i915, false);
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ intel_gt_sanitize(i915, false);
i915_gem_sanitize(i915);
i915_gem_resume(i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 89766688e420..a3cb0aade6f1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -22,10 +22,14 @@
*
*/
-#include "../i915_selftest.h"
+#include "gem/i915_gem_pm.h"
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
+#include "i915_selftest.h"
+
+#include "igt_flush_test.h"
#include "lib_sw_fence.h"
-#include "mock_context.h"
#include "mock_drm.h"
#include "mock_gem_device.h"
@@ -64,20 +68,24 @@ static int populate_ggtt(struct drm_i915_private *i915,
count++;
}
+ bound = 0;
unbound = 0;
- list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
- if (obj->mm.quirked)
+ list_for_each_entry(obj, objects, st_link) {
+ GEM_BUG_ON(!obj->mm.quirked);
+
+ if (atomic_read(&obj->bind_count))
+ bound++;
+ else
unbound++;
+ }
+ GEM_BUG_ON(bound + unbound != count);
+
if (unbound) {
pr_err("%s: Found %lu objects unbound, expected %u!\n",
__func__, unbound, 0);
return -EINVAL;
}
- bound = 0;
- list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
- if (obj->mm.quirked)
- bound++;
if (bound != count) {
pr_err("%s: Found %lu objects bound, expected %lu!\n",
__func__, bound, count);
@@ -397,7 +405,7 @@ static int igt_evict_contexts(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
/* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole));
@@ -460,7 +468,7 @@ static int igt_evict_contexts(void *arg)
/* We will need some GGTT space for the rq's context */
igt_evict_ctl.fail_if_busy = true;
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
igt_evict_ctl.fail_if_busy = false;
if (IS_ERR(rq)) {
@@ -498,6 +506,8 @@ static int igt_evict_contexts(void *arg)
mutex_lock(&i915->drm.struct_mutex);
out_locked:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
while (reserved) {
struct reserved *next = reserved->next;
@@ -508,7 +518,7 @@ out_locked:
}
if (drm_mm_node_allocated(&hole))
drm_mm_remove_node(&hole);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -532,7 +542,7 @@ int i915_gem_evict_mock_selftests(void)
return -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = i915_subtests(tests, i915);
mutex_unlock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 9cca66e4420a..1a60b9fe8221 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -25,10 +25,11 @@
#include <linux/list_sort.h>
#include <linux/prime_numbers.h>
-#include "../i915_selftest.h"
+#include "gem/selftests/mock_context.h"
+
#include "i915_random.h"
+#include "i915_selftest.h"
-#include "mock_context.h"
#include "mock_drm.h"
#include "mock_gem_device.h"
@@ -147,7 +148,7 @@ err:
static int igt_ppgtt_alloc(void *arg)
{
struct drm_i915_private *dev_priv = arg;
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
u64 size, last, limit;
int err = 0;
@@ -156,7 +157,7 @@ static int igt_ppgtt_alloc(void *arg)
if (!HAS_PPGTT(dev_priv))
return 0;
- ppgtt = __hw_ppgtt_create(dev_priv);
+ ppgtt = __ppgtt_create(dev_priv);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -208,7 +209,7 @@ static int igt_ppgtt_alloc(void *arg)
err_ppgtt_cleanup:
mutex_lock(&dev_priv->drm.struct_mutex);
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(&ppgtt->vm);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
@@ -294,9 +295,9 @@ static int lowlevel_hole(struct drm_i915_private *i915,
mock_vma.node.size = BIT_ULL(size);
mock_vma.node.start = addr;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
count = n;
@@ -998,7 +999,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
unsigned long end_time))
{
struct drm_file *file;
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
IGT_TIMEOUT(end_time);
int err;
@@ -1020,7 +1021,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(&ppgtt->vm);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1170,7 +1171,7 @@ static int igt_ggtt_page(void *arg)
if (err)
goto out_unpin;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for (n = 0; n < count; n++) {
u64 offset = tmp.start + n * PAGE_SIZE;
@@ -1217,7 +1218,7 @@ static int igt_ggtt_page(void *arg)
kfree(order);
out_remove:
ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
drm_mm_remove_node(&tmp);
out_unpin:
i915_gem_object_unpin_pages(obj);
@@ -1232,7 +1233,7 @@ static void track_vma_bind(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
- obj->bind_count++; /* track for eviction later */
+ atomic_inc(&obj->bind_count); /* track for eviction later */
__i915_gem_object_pin_pages(obj);
vma->pages = obj->mm.pages;
@@ -1250,7 +1251,6 @@ static int exercise_mock(struct drm_i915_private *i915,
{
const u64 limit = totalram_pages() << PAGE_SHIFT;
struct i915_gem_context *ctx;
- struct i915_hw_ppgtt *ppgtt;
IGT_TIMEOUT(end_time);
int err;
@@ -1258,10 +1258,7 @@ static int exercise_mock(struct drm_i915_private *i915,
if (!ctx)
return -ENOMEM;
- ppgtt = ctx->ppgtt;
- GEM_BUG_ON(!ppgtt);
-
- err = func(i915, &ppgtt->vm, 0, min(ppgtt->vm.total, limit), end_time);
+ err = func(i915, ctx->vm, 0, min(ctx->vm->total, limit), end_time);
mock_context_close(ctx);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 6d766925ad04..d5dc4427d664 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -16,13 +16,18 @@ selftest(timelines, i915_timeline_live_selftests)
selftest(requests, i915_request_live_selftests)
selftest(active, i915_active_live_selftests)
selftest(objects, i915_gem_object_live_selftests)
+selftest(mman, i915_gem_mman_live_selftests)
selftest(dmabuf, i915_gem_dmabuf_live_selftests)
+selftest(vma, i915_vma_live_selftests)
selftest(coherency, i915_gem_coherency_live_selftests)
selftest(gtt, i915_gem_gtt_live_selftests)
selftest(gem, i915_gem_live_selftests)
selftest(evict, i915_gem_evict_live_selftests)
selftest(hugepages, i915_gem_huge_page_live_selftests)
selftest(contexts, i915_gem_context_live_selftests)
+selftest(blt, i915_gem_object_blt_live_selftests)
+selftest(client, i915_gem_client_blt_live_selftests)
+selftest(reset, intel_reset_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
selftest(execlists, intel_execlists_live_selftests)
selftest(guc, intel_guc_live_selftest)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 88e5ab586337..510eb176bb2c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -18,6 +18,7 @@ selftest(engine, intel_engine_cs_mock_selftests)
selftest(timelines, i915_timeline_mock_selftests)
selftest(requests, i915_request_mock_selftests)
selftest(objects, i915_gem_object_mock_selftests)
+selftest(phys, i915_gem_phys_mock_selftests)
selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
selftest(vma, i915_vma_mock_selftests)
selftest(evict, i915_gem_evict_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index e6ffe2240126..298bb7116c51 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -24,12 +24,14 @@
#include <linux/prime_numbers.h>
-#include "../i915_selftest.h"
+#include "gem/i915_gem_pm.h"
+#include "gem/selftests/mock_context.h"
+
#include "i915_random.h"
+#include "i915_selftest.h"
#include "igt_live_test.h"
#include "lib_sw_fence.h"
-#include "mock_context.h"
#include "mock_drm.h"
#include "mock_gem_device.h"
@@ -72,12 +74,12 @@ static int igt_wait_request(void *arg)
goto out_unlock;
}
- if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
+ if (i915_request_wait(request, 0, 0) != -ETIME) {
pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
goto out_unlock;
}
- if (i915_request_wait(request, I915_WAIT_LOCKED, T) != -ETIME) {
+ if (i915_request_wait(request, 0, T) != -ETIME) {
pr_err("request wait succeeded (expected timeout before submit!)\n");
goto out_unlock;
}
@@ -89,7 +91,7 @@ static int igt_wait_request(void *arg)
i915_request_add(request);
- if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
+ if (i915_request_wait(request, 0, 0) != -ETIME) {
pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
goto out_unlock;
}
@@ -99,12 +101,12 @@ static int igt_wait_request(void *arg)
goto out_unlock;
}
- if (i915_request_wait(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
+ if (i915_request_wait(request, 0, T / 2) != -ETIME) {
pr_err("request wait succeeded (expected timeout!)\n");
goto out_unlock;
}
- if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
+ if (i915_request_wait(request, 0, T) == -ETIME) {
pr_err("request wait timed out!\n");
goto out_unlock;
}
@@ -114,7 +116,7 @@ static int igt_wait_request(void *arg)
goto out_unlock;
}
- if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
+ if (i915_request_wait(request, 0, T) == -ETIME) {
pr_err("request wait timed out when already complete!\n");
goto out_unlock;
}
@@ -267,7 +269,7 @@ static struct i915_request *
__live_request_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
- return i915_request_alloc(engine, ctx);
+ return igt_request_alloc(ctx, engine);
}
static int __igt_breadcrumbs_smoketest(void *arg)
@@ -512,7 +514,7 @@ int i915_request_mock_selftests(void)
if (!i915)
return -ENOMEM;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = i915_subtests(tests, i915);
drm_dev_put(&i915->drm);
@@ -535,7 +537,7 @@ static int live_nop_request(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for_each_engine(engine, i915, id) {
struct i915_request *request = NULL;
@@ -551,8 +553,7 @@ static int live_nop_request(void *arg)
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
- request = i915_request_alloc(engine,
- i915->kernel_context);
+ request = i915_request_create(engine->kernel_context);
if (IS_ERR(request)) {
err = PTR_ERR(request);
goto out_unlock;
@@ -573,9 +574,7 @@ static int live_nop_request(void *arg)
i915_request_add(request);
}
- i915_request_wait(request,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
times[1] = ktime_sub(ktime_get_raw(), times[1]);
if (prime == 1)
@@ -596,7 +595,7 @@ static int live_nop_request(void *arg)
}
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -649,7 +648,7 @@ empty_request(struct intel_engine_cs *engine,
struct i915_request *request;
int err;
- request = i915_request_alloc(engine, engine->i915->kernel_context);
+ request = i915_request_create(engine->kernel_context);
if (IS_ERR(request))
return request;
@@ -681,7 +680,7 @@ static int live_empty_request(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
batch = empty_batch(i915);
if (IS_ERR(batch)) {
@@ -705,9 +704,7 @@ static int live_empty_request(void *arg)
err = PTR_ERR(request);
goto out_batch;
}
- i915_request_wait(request,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
for_each_prime_number_from(prime, 1, 8192) {
times[1] = ktime_get_raw();
@@ -719,9 +716,7 @@ static int live_empty_request(void *arg)
goto out_batch;
}
}
- i915_request_wait(request,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
times[1] = ktime_sub(ktime_get_raw(), times[1]);
if (prime == 1)
@@ -745,7 +740,7 @@ out_batch:
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -753,8 +748,7 @@ out_unlock:
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx = i915->kernel_context;
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
const int gen = INTEL_GEN(i915);
struct i915_vma *vma;
@@ -839,7 +833,7 @@ static int live_all_engines(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
@@ -853,7 +847,7 @@ static int live_all_engines(void *arg)
}
for_each_engine(engine, i915, id) {
- request[id] = i915_request_alloc(engine, i915->kernel_context);
+ request[id] = i915_request_create(engine->kernel_context);
if (IS_ERR(request[id])) {
err = PTR_ERR(request[id]);
pr_err("%s: Request allocation failed with err=%d\n",
@@ -868,12 +862,9 @@ static int live_all_engines(void *arg)
GEM_BUG_ON(err);
request[id]->batch = batch;
- if (!i915_gem_object_has_active_reference(batch->obj)) {
- i915_gem_object_get(batch->obj);
- i915_gem_object_set_active_reference(batch->obj);
- }
-
+ i915_vma_lock(batch);
err = i915_vma_move_to_active(batch, request[id], 0);
+ i915_vma_unlock(batch);
GEM_BUG_ON(err);
i915_request_get(request[id]);
@@ -898,8 +889,7 @@ static int live_all_engines(void *arg)
for_each_engine(engine, i915, id) {
long timeout;
- timeout = i915_request_wait(request[id],
- I915_WAIT_LOCKED,
+ timeout = i915_request_wait(request[id], 0,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
err = timeout;
@@ -922,7 +912,7 @@ out_request:
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -945,7 +935,7 @@ static int live_sequential_engines(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
@@ -962,7 +952,7 @@ static int live_sequential_engines(void *arg)
goto out_unlock;
}
- request[id] = i915_request_alloc(engine, i915->kernel_context);
+ request[id] = i915_request_create(engine->kernel_context);
if (IS_ERR(request[id])) {
err = PTR_ERR(request[id]);
pr_err("%s: Request allocation failed for %s with err=%d\n",
@@ -988,12 +978,11 @@ static int live_sequential_engines(void *arg)
GEM_BUG_ON(err);
request[id]->batch = batch;
+ i915_vma_lock(batch);
err = i915_vma_move_to_active(batch, request[id], 0);
+ i915_vma_unlock(batch);
GEM_BUG_ON(err);
- i915_gem_object_set_active_reference(batch->obj);
- i915_vma_get(batch);
-
i915_request_get(request[id]);
i915_request_add(request[id]);
@@ -1017,8 +1006,7 @@ static int live_sequential_engines(void *arg)
goto out_request;
}
- timeout = i915_request_wait(request[id],
- I915_WAIT_LOCKED,
+ timeout = i915_request_wait(request[id], 0,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
err = timeout;
@@ -1052,7 +1040,7 @@ out_request:
i915_request_put(request[id]);
}
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -1075,7 +1063,7 @@ max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
if (HAS_EXECLISTS(ctx->i915))
return INT_MAX;
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
} else {
@@ -1117,7 +1105,7 @@ static int live_breadcrumbs_smoketest(void *arg)
* On real hardware this time.
*/
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
file = mock_file(i915);
if (IS_ERR(file)) {
@@ -1224,7 +1212,7 @@ out_threads:
out_file:
mock_file_free(i915, file);
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return ret;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c
index bd96afcadfe7..76d3977f1d4b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c
@@ -6,8 +6,10 @@
#include <linux/prime_numbers.h>
-#include "../i915_selftest.h"
+#include "gem/i915_gem_pm.h"
+
#include "i915_random.h"
+#include "i915_selftest.h"
#include "igt_flush_test.h"
#include "mock_gem_device.h"
@@ -454,7 +456,7 @@ tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value)
goto out;
}
- rq = i915_request_alloc(engine, engine->i915->kernel_context);
+ rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq))
goto out_unpin;
@@ -513,7 +515,7 @@ static int live_hwsp_engine(void *arg)
return -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
count = 0;
for_each_engine(engine, i915, id) {
@@ -556,7 +558,7 @@ out:
i915_timeline_put(tl);
}
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
kvfree(timelines);
@@ -589,7 +591,7 @@ static int live_hwsp_alternate(void *arg)
return -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
count = 0;
for (n = 0; n < NUM_TIMELINES; n++) {
@@ -632,7 +634,7 @@ out:
i915_timeline_put(tl);
}
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
kvfree(timelines);
@@ -656,7 +658,7 @@ static int live_hwsp_wrap(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
tl = i915_timeline_create(i915, NULL);
if (IS_ERR(tl)) {
@@ -678,7 +680,7 @@ static int live_hwsp_wrap(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- rq = i915_request_alloc(engine, i915->kernel_context);
+ rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
@@ -722,7 +724,7 @@ static int live_hwsp_wrap(void *arg)
i915_request_add(rq);
- if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Wait for timeline writes timed out!\n");
err = -EIO;
goto out;
@@ -747,7 +749,7 @@ out:
out_free:
i915_timeline_put(tl);
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -769,7 +771,7 @@ static int live_hwsp_recycle(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
count = 0;
for_each_engine(engine, i915, id) {
@@ -795,9 +797,7 @@ static int live_hwsp_recycle(void *arg)
goto out;
}
- if (i915_request_wait(rq,
- I915_WAIT_LOCKED,
- HZ / 5) < 0) {
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Wait for timeline writes timed out!\n");
i915_timeline_put(tl);
err = -EIO;
@@ -823,7 +823,7 @@ static int live_hwsp_recycle(void *arg)
out:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index fc594b030f5a..fbc79b14823a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -24,10 +24,12 @@
#include <linux/prime_numbers.h>
-#include "../i915_selftest.h"
+#include "gem/selftests/mock_context.h"
+
+#include "i915_scatterlist.h"
+#include "i915_selftest.h"
#include "mock_gem_device.h"
-#include "mock_context.h"
#include "mock_gtt.h"
static bool assert_vma(struct i915_vma *vma,
@@ -36,7 +38,7 @@ static bool assert_vma(struct i915_vma *vma,
{
bool ok = true;
- if (vma->vm != &ctx->ppgtt->vm) {
+ if (vma->vm != ctx->vm) {
pr_err("VMA created with wrong VM\n");
ok = false;
}
@@ -59,7 +61,7 @@ static bool assert_vma(struct i915_vma *vma,
static struct i915_vma *
checked_vma_instance(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
- struct i915_ggtt_view *view)
+ const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
bool ok = true;
@@ -111,7 +113,7 @@ static int create_vmas(struct drm_i915_private *i915,
list_for_each_entry(obj, objects, st_link) {
for (pinned = 0; pinned <= 1; pinned++) {
list_for_each_entry(ctx, contexts, link) {
- struct i915_address_space *vm = &ctx->ppgtt->vm;
+ struct i915_address_space *vm = ctx->vm;
struct i915_vma *vma;
int err;
@@ -397,18 +399,79 @@ assert_rotated(struct drm_i915_gem_object *obj,
return sg;
}
-static unsigned int rotated_size(const struct intel_rotation_plane_info *a,
- const struct intel_rotation_plane_info *b)
+static unsigned long remapped_index(const struct intel_remapped_info *r,
+ unsigned int n,
+ unsigned int x,
+ unsigned int y)
+{
+ return (r->plane[n].stride * y +
+ r->plane[n].offset + x);
+}
+
+static struct scatterlist *
+assert_remapped(struct drm_i915_gem_object *obj,
+ const struct intel_remapped_info *r, unsigned int n,
+ struct scatterlist *sg)
+{
+ unsigned int x, y;
+ unsigned int left = 0;
+ unsigned int offset;
+
+ for (y = 0; y < r->plane[n].height; y++) {
+ for (x = 0; x < r->plane[n].width; x++) {
+ unsigned long src_idx;
+ dma_addr_t src;
+
+ if (!sg) {
+ pr_err("Invalid sg table: too short at plane %d, (%d, %d)!\n",
+ n, x, y);
+ return ERR_PTR(-EINVAL);
+ }
+ if (!left) {
+ offset = 0;
+ left = sg_dma_len(sg);
+ }
+
+ src_idx = remapped_index(r, n, x, y);
+ src = i915_gem_object_get_dma_address(obj, src_idx);
+
+ if (left < PAGE_SIZE || left & (PAGE_SIZE-1)) {
+ pr_err("Invalid sg.length, found %d, expected %lu for remapped page (%d, %d) [src index %lu]\n",
+ sg_dma_len(sg), PAGE_SIZE,
+ x, y, src_idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (sg_dma_address(sg) + offset != src) {
+ pr_err("Invalid address for remapped page (%d, %d) [src index %lu]\n",
+ x, y, src_idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ left -= PAGE_SIZE;
+ offset += PAGE_SIZE;
+
+
+ if (!left)
+ sg = sg_next(sg);
+ }
+ }
+
+ return sg;
+}
+
+static unsigned int rotated_size(const struct intel_remapped_plane_info *a,
+ const struct intel_remapped_plane_info *b)
{
return a->width * a->height + b->width * b->height;
}
-static int igt_vma_rotate(void *arg)
+static int igt_vma_rotate_remap(void *arg)
{
struct i915_ggtt *ggtt = arg;
struct i915_address_space *vm = &ggtt->vm;
struct drm_i915_gem_object *obj;
- const struct intel_rotation_plane_info planes[] = {
+ const struct intel_remapped_plane_info planes[] = {
{ .width = 1, .height = 1, .stride = 1 },
{ .width = 2, .height = 2, .stride = 2 },
{ .width = 4, .height = 4, .stride = 4 },
@@ -426,6 +489,11 @@ static int igt_vma_rotate(void *arg)
{ .width = 6, .height = 4, .stride = 6 },
{ }
}, *a, *b;
+ enum i915_ggtt_view_type types[] = {
+ I915_GGTT_VIEW_ROTATED,
+ I915_GGTT_VIEW_REMAPPED,
+ 0,
+ }, *t;
const unsigned int max_pages = 64;
int err = -ENOMEM;
@@ -437,6 +505,7 @@ static int igt_vma_rotate(void *arg)
if (IS_ERR(obj))
goto out;
+ for (t = types; *t; t++) {
for (a = planes; a->width; a++) {
for (b = planes + ARRAY_SIZE(planes); b-- != planes; ) {
struct i915_ggtt_view view;
@@ -447,7 +516,7 @@ static int igt_vma_rotate(void *arg)
GEM_BUG_ON(max_offset > max_pages);
max_offset = max_pages - max_offset;
- view.type = I915_GGTT_VIEW_ROTATED;
+ view.type = *t;
view.rotated.plane[0] = *a;
view.rotated.plane[1] = *b;
@@ -468,14 +537,23 @@ static int igt_vma_rotate(void *arg)
goto out_object;
}
- if (vma->size != rotated_size(a, b) * PAGE_SIZE) {
+ if (view.type == I915_GGTT_VIEW_ROTATED &&
+ vma->size != rotated_size(a, b) * PAGE_SIZE) {
+ pr_err("VMA is wrong size, expected %lu, found %llu\n",
+ PAGE_SIZE * rotated_size(a, b), vma->size);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ if (view.type == I915_GGTT_VIEW_REMAPPED &&
+ vma->size > rotated_size(a, b) * PAGE_SIZE) {
pr_err("VMA is wrong size, expected %lu, found %llu\n",
PAGE_SIZE * rotated_size(a, b), vma->size);
err = -EINVAL;
goto out_object;
}
- if (vma->pages->nents != rotated_size(a, b)) {
+ if (vma->pages->nents > rotated_size(a, b)) {
pr_err("sg table is wrong sizeo, expected %u, found %u nents\n",
rotated_size(a, b), vma->pages->nents);
err = -EINVAL;
@@ -497,9 +575,14 @@ static int igt_vma_rotate(void *arg)
sg = vma->pages->sgl;
for (n = 0; n < ARRAY_SIZE(view.rotated.plane); n++) {
- sg = assert_rotated(obj, &view.rotated, n, sg);
+ if (view.type == I915_GGTT_VIEW_ROTATED)
+ sg = assert_rotated(obj, &view.rotated, n, sg);
+ else
+ sg = assert_remapped(obj, &view.remapped, n, sg);
if (IS_ERR(sg)) {
- pr_err("Inconsistent VMA pages for plane %d: [(%d, %d, %d, %d), (%d, %d, %d, %d)]\n", n,
+ pr_err("Inconsistent %s VMA pages for plane %d: [(%d, %d, %d, %d), (%d, %d, %d, %d)]\n",
+ view.type == I915_GGTT_VIEW_ROTATED ?
+ "rotated" : "remapped", n,
view.rotated.plane[0].width,
view.rotated.plane[0].height,
view.rotated.plane[0].stride,
@@ -518,6 +601,7 @@ static int igt_vma_rotate(void *arg)
}
}
}
+ }
out_object:
i915_gem_object_put(obj);
@@ -721,7 +805,7 @@ int i915_vma_mock_selftests(void)
static const struct i915_subtest tests[] = {
SUBTEST(igt_vma_create),
SUBTEST(igt_vma_pin1),
- SUBTEST(igt_vma_rotate),
+ SUBTEST(igt_vma_rotate_remap),
SUBTEST(igt_vma_partial),
};
struct drm_i915_private *i915;
@@ -752,3 +836,147 @@ out_put:
drm_dev_put(&i915->drm);
return err;
}
+
+static int igt_vma_remapped_gtt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ const struct intel_remapped_plane_info planes[] = {
+ { .width = 1, .height = 1, .stride = 1 },
+ { .width = 2, .height = 2, .stride = 2 },
+ { .width = 4, .height = 4, .stride = 4 },
+ { .width = 8, .height = 8, .stride = 8 },
+
+ { .width = 3, .height = 5, .stride = 3 },
+ { .width = 3, .height = 5, .stride = 4 },
+ { .width = 3, .height = 5, .stride = 5 },
+
+ { .width = 5, .height = 3, .stride = 5 },
+ { .width = 5, .height = 3, .stride = 7 },
+ { .width = 5, .height = 3, .stride = 9 },
+
+ { .width = 4, .height = 6, .stride = 6 },
+ { .width = 6, .height = 4, .stride = 6 },
+ { }
+ }, *p;
+ enum i915_ggtt_view_type types[] = {
+ I915_GGTT_VIEW_ROTATED,
+ I915_GGTT_VIEW_REMAPPED,
+ 0,
+ }, *t;
+ struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
+ int err = 0;
+
+ obj = i915_gem_object_create_internal(i915, 10 * 10 * PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ for (t = types; *t; t++) {
+ for (p = planes; p->width; p++) {
+ struct i915_ggtt_view view = {
+ .type = *t,
+ .rotated.plane[0] = *p,
+ };
+ struct i915_vma *vma;
+ u32 __iomem *map;
+ unsigned int x, y;
+ int err;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err)
+ goto out;
+
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ GEM_BUG_ON(vma->ggtt_view.type != *t);
+
+ map = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out;
+ }
+
+ for (y = 0 ; y < p->height; y++) {
+ for (x = 0 ; x < p->width; x++) {
+ unsigned int offset;
+ u32 val = y << 16 | x;
+
+ if (*t == I915_GGTT_VIEW_ROTATED)
+ offset = (x * p->height + y) * PAGE_SIZE;
+ else
+ offset = (y * p->width + x) * PAGE_SIZE;
+
+ iowrite32(val, &map[offset / sizeof(*map)]);
+ }
+ }
+
+ i915_vma_unpin_iomap(vma);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ GEM_BUG_ON(vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL);
+
+ map = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out;
+ }
+
+ for (y = 0 ; y < p->height; y++) {
+ for (x = 0 ; x < p->width; x++) {
+ unsigned int offset, src_idx;
+ u32 exp = y << 16 | x;
+ u32 val;
+
+ if (*t == I915_GGTT_VIEW_ROTATED)
+ src_idx = rotated_index(&view.rotated, 0, x, y);
+ else
+ src_idx = remapped_index(&view.remapped, 0, x, y);
+ offset = src_idx * PAGE_SIZE;
+
+ val = ioread32(&map[offset / sizeof(*map)]);
+ if (val != exp) {
+ pr_err("%s VMA write test failed, expected 0x%x, found 0x%x\n",
+ *t == I915_GGTT_VIEW_ROTATED ? "Rotated" : "Remapped",
+ val, exp);
+ i915_vma_unpin_iomap(vma);
+ goto out;
+ }
+ }
+ }
+ i915_vma_unpin_iomap(vma);
+ }
+ }
+
+out:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+int i915_vma_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_vma_remapped_gtt),
+ };
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_atomic.h b/drivers/gpu/drm/i915/selftests/igt_atomic.h
new file mode 100644
index 000000000000..93ec89f487ec
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_atomic.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef IGT_ATOMIC_H
+#define IGT_ATOMIC_H
+
+#include <linux/preempt.h>
+#include <linux/bottom_half.h>
+#include <linux/irqflags.h>
+
+static void __preempt_begin(void)
+{
+ preempt_disable();
+}
+
+static void __preempt_end(void)
+{
+ preempt_enable();
+}
+
+static void __softirq_begin(void)
+{
+ local_bh_disable();
+}
+
+static void __softirq_end(void)
+{
+ local_bh_enable();
+}
+
+static void __hardirq_begin(void)
+{
+ local_irq_disable();
+}
+
+static void __hardirq_end(void)
+{
+ local_irq_enable();
+}
+
+struct igt_atomic_section {
+ const char *name;
+ void (*critical_section_begin)(void);
+ void (*critical_section_end)(void);
+};
+
+static const struct igt_atomic_section igt_atomic_phases[] = {
+ { "preempt", __preempt_begin, __preempt_end },
+ { "softirq", __softirq_begin, __softirq_end },
+ { "hardirq", __hardirq_begin, __hardirq_end },
+ { }
+};
+
+#endif /* IGT_ATOMIC_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index 94aee4071a66..5bfd1b2626a2 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -4,30 +4,38 @@
* Copyright © 2018 Intel Corporation
*/
-#include "../i915_drv.h"
+#include "gem/i915_gem_context.h"
+
+#include "i915_drv.h"
+#include "i915_selftest.h"
-#include "../i915_selftest.h"
#include "igt_flush_test.h"
int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
{
+ int ret = i915_terminally_wedged(i915) ? -EIO : 0;
+ int repeat = !!(flags & I915_WAIT_LOCKED);
+
cond_resched();
- if (flags & I915_WAIT_LOCKED &&
- i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines)) {
- pr_err("Failed to switch back to kernel context; declaring wedged\n");
- i915_gem_set_wedged(i915);
- }
+ do {
+ if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) {
+ pr_err("%pS timed out, cancelling all further testing.\n",
+ __builtin_return_address(0));
- if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) {
- pr_err("%pS timed out, cancelling all further testing.\n",
- __builtin_return_address(0));
+ GEM_TRACE("%pS timed out.\n",
+ __builtin_return_address(0));
+ GEM_TRACE_DUMP();
- GEM_TRACE("%pS timed out.\n", __builtin_return_address(0));
- GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ repeat = 0;
+ ret = -EIO;
+ }
- i915_gem_set_wedged(i915);
- }
+ /* Ensure we also flush after wedging. */
+ if (flags & I915_WAIT_LOCKED)
+ i915_retire_requests(i915);
+ } while (repeat--);
- return i915_terminally_wedged(i915);
+ return ret;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c
index 208a966da8ca..587df6fd4ffe 100644
--- a/drivers/gpu/drm/i915/selftests/igt_reset.c
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.c
@@ -6,8 +6,9 @@
#include "igt_reset.h"
+#include "gt/intel_engine.h"
+
#include "../i915_drv.h"
-#include "../intel_ringbuffer.h"
void igt_global_reset_lock(struct drm_i915_private *i915)
{
@@ -42,3 +43,11 @@ void igt_global_reset_unlock(struct drm_i915_private *i915)
clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
wake_up_all(&i915->gpu_error.reset_queue);
}
+
+bool igt_force_reset(struct drm_i915_private *i915)
+{
+ i915_gem_set_wedged(i915);
+ i915_reset(i915, 0, NULL);
+
+ return !i915_reset_failed(i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.h b/drivers/gpu/drm/i915/selftests/igt_reset.h
index 5f0234d045d5..363bd853e50f 100644
--- a/drivers/gpu/drm/i915/selftests/igt_reset.h
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.h
@@ -11,5 +11,6 @@
void igt_global_reset_lock(struct drm_i915_private *i915);
void igt_global_reset_unlock(struct drm_i915_private *i915);
+bool igt_force_reset(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 16890dfe74c0..1e59b543cf27 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -4,6 +4,8 @@
* Copyright © 2018 Intel Corporation
*/
+#include "gem/selftests/igt_gem_utils.h"
+
#include "igt_spinner.h"
int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
@@ -74,16 +76,11 @@ static int move_to_active(struct i915_vma *vma,
{
int err;
+ i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, flags);
- if (err)
- return err;
-
- if (!i915_gem_object_has_active_reference(vma->obj)) {
- i915_gem_object_get(vma->obj);
- i915_gem_object_set_active_reference(vma->obj);
- }
+ i915_vma_unlock(vma);
- return 0;
+ return err;
}
struct i915_request *
@@ -92,17 +89,16 @@ igt_spinner_create_request(struct igt_spinner *spin,
struct intel_engine_cs *engine,
u32 arbitration_command)
{
- struct i915_address_space *vm = &ctx->ppgtt->vm;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
u32 *batch;
int err;
- vma = i915_vma_instance(spin->obj, vm, NULL);
+ vma = i915_vma_instance(spin->obj, ctx->vm, NULL);
if (IS_ERR(vma))
return ERR_CAST(vma);
- hws = i915_vma_instance(spin->hws, vm, NULL);
+ hws = i915_vma_instance(spin->hws, ctx->vm, NULL);
if (IS_ERR(hws))
return ERR_CAST(hws);
@@ -114,7 +110,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
if (err)
goto unpin_vma;
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto unpin_hws;
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h
index 391777c76dc7..34a88ac9b47a 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.h
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h
@@ -7,12 +7,12 @@
#ifndef __I915_SELFTESTS_IGT_SPINNER_H__
#define __I915_SELFTESTS_IGT_SPINNER_H__
-#include "../i915_selftest.h"
+#include "gem/i915_gem_context.h"
+#include "gt/intel_engine.h"
-#include "../i915_drv.h"
-#include "../i915_request.h"
-#include "../intel_ringbuffer.h"
-#include "../i915_gem_context.h"
+#include "i915_drv.h"
+#include "i915_request.h"
+#include "i915_selftest.h"
struct igt_spinner {
struct drm_i915_private *i915;
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index b05a21eaa8f4..6ca8584cd64c 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -22,7 +22,8 @@
*
*/
-#include "../i915_selftest.h"
+#include "i915_selftest.h"
+#include "gem/i915_gem_pm.h"
/* max doorbell number + negative test for each client type */
#define ATTEMPTS (GUC_NUM_DOORBELLS + GUC_CLIENT_PRIORITY_NUM)
@@ -143,7 +144,7 @@ static int igt_guc_clients(void *args)
GEM_BUG_ON(!HAS_GUC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
guc = &dev_priv->guc;
if (!guc) {
@@ -226,7 +227,7 @@ out:
guc_clients_create(guc);
guc_clients_enable(guc);
unlock:
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
@@ -246,7 +247,7 @@ static int igt_guc_doorbells(void *arg)
GEM_BUG_ON(!HAS_GUC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
guc = &dev_priv->guc;
if (!guc) {
@@ -339,7 +340,7 @@ out:
guc_client_free(clients[i]);
}
unlock:
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index e0d7ebecb215..86815c6072a1 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -176,7 +176,7 @@ static int live_forcewake_ops(void *arg)
return 0;
}
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for_each_fw_domain(domain, uncore, tmp) {
smp_store_mb(domain->active, false);
@@ -247,7 +247,7 @@ static int live_forcewake_ops(void *arg)
}
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
index 2bfa72c1654b..b976c12817c5 100644
--- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -45,6 +45,9 @@ void __onstack_fence_init(struct i915_sw_fence *fence,
void onstack_fence_fini(struct i915_sw_fence *fence)
{
+ if (!fence->flags)
+ return;
+
i915_sw_fence_commit(fence);
i915_sw_fence_fini(fence);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.h b/drivers/gpu/drm/i915/selftests/mock_context.h
deleted file mode 100644
index 29b9d60a158b..000000000000
--- a/drivers/gpu/drm/i915/selftests/mock_context.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __MOCK_CONTEXT_H
-#define __MOCK_CONTEXT_H
-
-void mock_init_contexts(struct drm_i915_private *i915);
-
-struct i915_gem_context *
-mock_context(struct drm_i915_private *i915,
- const char *name);
-
-void mock_context_close(struct i915_gem_context *ctx);
-
-struct i915_gem_context *
-live_context(struct drm_i915_private *i915, struct drm_file *file);
-
-struct i915_gem_context *kernel_context(struct drm_i915_private *i915);
-void kernel_context_close(struct i915_gem_context *ctx);
-
-#endif /* !__MOCK_CONTEXT_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.h b/drivers/gpu/drm/i915/selftests/mock_dmabuf.h
deleted file mode 100644
index ec80613159b9..000000000000
--- a/drivers/gpu/drm/i915/selftests/mock_dmabuf.h
+++ /dev/null
@@ -1,41 +0,0 @@
-
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __MOCK_DMABUF_H__
-#define __MOCK_DMABUF_H__
-
-#include <linux/dma-buf.h>
-
-struct mock_dmabuf {
- int npages;
- struct page *pages[];
-};
-
-static struct mock_dmabuf *to_mock(struct dma_buf *buf)
-{
- return buf->priv;
-}
-
-#endif /* !__MOCK_DMABUF_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 60bbf8b4df40..64bc51400ae7 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -25,14 +25,16 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
-#include "mock_engine.h"
-#include "mock_context.h"
+#include "gt/mock_engine.h"
+
#include "mock_request.h"
#include "mock_gem_device.h"
-#include "mock_gem_object.h"
#include "mock_gtt.h"
#include "mock_uncore.h"
+#include "gem/selftests/mock_context.h"
+#include "gem/selftests/mock_gem_object.h"
+
void mock_device_flush(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@@ -40,11 +42,10 @@ void mock_device_flush(struct drm_i915_private *i915)
lockdep_assert_held(&i915->drm.struct_mutex);
- for_each_engine(engine, i915, id)
- mock_engine_flush(engine);
-
- i915_retire_requests(i915);
- GEM_BUG_ON(i915->gt.active_requests);
+ do {
+ for_each_engine(engine, i915, id)
+ mock_engine_flush(engine);
+ } while (i915_retire_requests(i915));
}
static void mock_device_release(struct drm_device *dev)
@@ -55,11 +56,9 @@ static void mock_device_release(struct drm_device *dev)
mutex_lock(&i915->drm.struct_mutex);
mock_device_flush(i915);
- i915_gem_contexts_lost(i915);
mutex_unlock(&i915->drm.struct_mutex);
- drain_delayed_work(&i915->gt.retire_work);
- drain_delayed_work(&i915->gt.idle_work);
+ flush_work(&i915->gem.idle_work);
i915_gem_drain_workqueue(i915);
mutex_lock(&i915->drm.struct_mutex);
@@ -109,10 +108,6 @@ static void mock_retire_work_handler(struct work_struct *work)
static void mock_idle_work_handler(struct work_struct *work)
{
- struct drm_i915_private *i915 =
- container_of(work, typeof(*i915), gt.idle_work.work);
-
- i915->gt.active_engines = 0;
}
static int pm_domain_resume(struct device *dev)
@@ -156,8 +151,6 @@ struct drm_i915_private *mock_gem_device(void)
i915 = (struct drm_i915_private *)(pdev + 1);
pci_set_drvdata(pdev, i915);
- intel_runtime_pm_init_early(i915);
-
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
@@ -172,6 +165,8 @@ struct drm_i915_private *mock_gem_device(void)
i915->drm.pdev = pdev;
i915->drm.dev_private = i915;
+ intel_runtime_pm_init_early(&i915->runtime_pm);
+
/* Using the global GTT may ask questions about KMS users, so prepare */
drm_mode_config_init(&i915->drm);
@@ -184,6 +179,8 @@ struct drm_i915_private *mock_gem_device(void)
mock_uncore_init(&i915->uncore);
i915_gem_init__mm(i915);
+ intel_gt_pm_init(i915);
+ atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */
init_waitqueue_head(&i915->gpu_error.wait_queue);
init_waitqueue_head(&i915->gpu_error.reset_queue);
@@ -196,8 +193,8 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_contexts(i915);
- INIT_DELAYED_WORK(&i915->gt.retire_work, mock_retire_work_handler);
- INIT_DELAYED_WORK(&i915->gt.idle_work, mock_idle_work_handler);
+ INIT_DELAYED_WORK(&i915->gem.retire_work, mock_retire_work_handler);
+ INIT_WORK(&i915->gem.idle_work, mock_idle_work_handler);
i915->gt.awake = true;
@@ -205,18 +202,23 @@ struct drm_i915_private *mock_gem_device(void)
INIT_LIST_HEAD(&i915->gt.active_rings);
INIT_LIST_HEAD(&i915->gt.closed_vma);
+ spin_lock_init(&i915->gt.closed_lock);
mutex_lock(&i915->drm.struct_mutex);
mock_init_ggtt(i915, &i915->ggtt);
mkwrite_device_info(i915)->engine_mask = BIT(0);
- i915->kernel_context = mock_context(i915, NULL);
- if (!i915->kernel_context)
- goto err_unlock;
i915->engine[RCS0] = mock_engine(i915, "mock", RCS0);
if (!i915->engine[RCS0])
+ goto err_unlock;
+
+ i915->kernel_context = mock_context(i915, NULL);
+ if (!i915->kernel_context)
+ goto err_engine;
+
+ if (mock_engine_init(i915->engine[RCS0]))
goto err_context;
mutex_unlock(&i915->drm.struct_mutex);
@@ -227,6 +229,8 @@ struct drm_i915_private *mock_gem_device(void)
err_context:
i915_gem_contexts_fini(i915);
+err_engine:
+ mock_engine_free(i915->engine[RCS0]);
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
i915_timelines_fini(i915);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index cd83929fde8e..f625c307a406 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -55,17 +55,14 @@ static void mock_cleanup(struct i915_address_space *vm)
{
}
-struct i915_hw_ppgtt *
-mock_ppgtt(struct drm_i915_private *i915,
- const char *name)
+struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
{
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
return NULL;
- kref_init(&ppgtt->ref);
ppgtt->vm.i915 = i915;
ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
ppgtt->vm.file = ERR_PTR(-ENODEV);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.h b/drivers/gpu/drm/i915/selftests/mock_gtt.h
index 40d544bde1d5..3387393286de 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.h
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.h
@@ -28,8 +28,6 @@
void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt);
void mock_fini_ggtt(struct i915_ggtt *ggtt);
-struct i915_hw_ppgtt *
-mock_ppgtt(struct drm_i915_private *i915,
- const char *name);
+struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name);
#endif /* !__MOCK_GTT_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
index d1a7c9608712..9390fc09984b 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.c
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -22,7 +22,9 @@
*
*/
-#include "mock_engine.h"
+#include "gem/selftests/igt_gem_utils.h"
+#include "gt/mock_engine.h"
+
#include "mock_request.h"
struct i915_request *
@@ -33,7 +35,7 @@ mock_request(struct intel_engine_cs *engine,
struct i915_request *request;
/* NB the i915->requests slab cache is enlarged to fit mock_request */
- request = i915_request_alloc(engine, context);
+ request = igt_request_alloc(context, engine);
if (IS_ERR(request))
return NULL;
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c
index e084476469ef..65b52be23d42 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/mock_timeline.c
@@ -13,7 +13,6 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
timeline->i915 = NULL;
timeline->fence_context = context;
- spin_lock_init(&timeline->lock);
mutex_init(&timeline->mutex);
INIT_ACTIVE_REQUEST(&timeline->last_request);
diff --git a/drivers/gpu/drm/i915/selftests/scatterlist.c b/drivers/gpu/drm/i915/selftests/scatterlist.c
index cd6d2a16071f..d599186d5b71 100644
--- a/drivers/gpu/drm/i915/selftests/scatterlist.c
+++ b/drivers/gpu/drm/i915/selftests/scatterlist.c
@@ -24,7 +24,8 @@
#include <linux/prime_numbers.h>
#include <linux/random.h>
-#include "../i915_selftest.h"
+#include "i915_selftest.h"
+#include "i915_utils.h"
#define PFN_BIAS (1 << 10)