summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/device-drivers.tmpl4
-rw-r--r--Documentation/DocBook/gpu.tmpl27
-rw-r--r--MAINTAINERS3
-rw-r--r--arch/x86/kernel/early-quirks.c404
-rw-r--r--drivers/dma-buf/Makefile2
-rw-r--r--drivers/dma-buf/fence-array.c144
-rw-r--r--drivers/dma-buf/fence.c8
-rw-r--r--drivers/dma-buf/sync_file.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c12
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c4
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c19
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c14
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c3
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c10
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c4
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c11
-rw-r--r--drivers/gpu/drm/drm_atomic.c80
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c79
-rw-r--r--drivers/gpu/drm/drm_bridge.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c117
-rw-r--r--drivers/gpu/drm/drm_edid_load.c2
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c27
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c48
-rw-r--r--drivers/gpu/drm/drm_fops.c18
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_irq.c133
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c22
-rw-r--r--drivers/gpu/drm/drm_mm.c4
-rw-r--r--drivers/gpu/drm/drm_modes.c4
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c13
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c10
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c21
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c8
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c2
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c9
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c7
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c2
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c44
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c167
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c132
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c129
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h405
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c417
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c464
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c26
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c239
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c81
-rw-r--r--drivers/gpu/drm/i915/i915_guc_reg.h6
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c174
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c515
-rw-r--r--drivers/gpu/drm/i915/i915_params.c18
-rw-r--r--drivers/gpu/drm/i915/i915_params.h4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h21
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c11
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h48
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c31
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h2
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c5
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c28
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h16
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c30
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1701
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c485
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c172
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c470
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c40
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h195
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c54
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h4
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c179
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c77
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c18
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c7
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c39
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h37
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h3
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c179
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c354
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c13
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c704
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h19
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c6
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c109
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c147
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1189
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c53
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c476
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h41
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c61
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c43
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c221
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c20
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c12
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c37
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c8
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c14
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c11
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c20
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c10
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c7
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c8
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c7
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c14
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c7
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c7
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c7
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c7
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c7
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c2
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c21
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c10
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c5
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c126
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c150
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h4
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c4
-rw-r--r--drivers/staging/android/sync.h3
-rw-r--r--include/drm/drmP.h28
-rw-r--r--include/drm/drm_atomic.h66
-rw-r--r--include/drm/drm_atomic_helper.h30
-rw-r--r--include/drm/drm_crtc.h163
-rw-r--r--include/drm/drm_dp_helper.h1
-rw-r--r--include/drm/drm_mipi_dsi.h1
-rw-r--r--include/drm/drm_modes.h2
-rw-r--r--include/drm/drm_modeset_helper_vtables.h10
-rw-r--r--include/drm/i915_drm.h3
-rw-r--r--include/linux/fence-array.h73
-rw-r--r--include/linux/fence.h13
-rw-r--r--include/linux/io-mapping.h10
-rw-r--r--include/linux/vga_switcheroo.h2
199 files changed, 7060 insertions, 5731 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index 8c68768ebee5..c3313d45f4d6 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -161,6 +161,10 @@ X!Edrivers/base/interface.c
!Iinclude/linux/fence.h
!Edrivers/dma-buf/seqno-fence.c
!Iinclude/linux/seqno-fence.h
+!Edrivers/dma-buf/fence-array.c
+!Iinclude/linux/fence-array.h
+!Edrivers/dma-buf/reservation.c
+!Iinclude/linux/reservation.h
!Edrivers/dma-buf/sync_file.c
!Iinclude/linux/sync_file.h
</sect2>
diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl
index 7586bf75f62e..dac18b4ff090 100644
--- a/Documentation/DocBook/gpu.tmpl
+++ b/Documentation/DocBook/gpu.tmpl
@@ -1092,22 +1092,6 @@ int max_width, max_height;</synopsis>
operation.
</para>
</sect2>
- <sect2>
- <title>Locking</title>
- <para>
- Beside some lookup structures with their own locking (which is hidden
- behind the interface functions) most of the modeset state is protected
- by the <code>dev-&lt;mode_config.lock</code> mutex and additionally
- per-crtc locks to allow cursor updates, pageflips and similar operations
- to occur concurrently with background tasks like output detection.
- Operations which cross domains like a full modeset always grab all
- locks. Drivers there need to protect resources shared between crtcs with
- additional locking. They also need to be careful to always grab the
- relevant crtc locks if a modset functions touches crtc state, e.g. for
- load detection (which does only grab the <code>mode_config.lock</code>
- to allow concurrent screen updates on live crtcs).
- </para>
- </sect2>
</sect1>
<!-- Internals: kms initialization and cleanup -->
@@ -1586,7 +1570,7 @@ void intel_crt_init(struct drm_device *dev)
</sect3>
<sect3>
<title>Implementing Asynchronous Atomic Commit</title>
-!Pdrivers/gpu/drm/drm_atomic_helper.c implementing async commit
+!Pdrivers/gpu/drm/drm_atomic_helper.c implementing nonblocking commit
</sect3>
<sect3>
<title>Atomic State Reset and Initialization</title>
@@ -2845,14 +2829,7 @@ void (*disable_vblank) (struct drm_device *dev, int crtc);</synopsis>
<para>
Drivers must initialize the vertical blanking handling core with a call to
<function>drm_vblank_init</function> in their
- <methodname>load</methodname> operation. The function will set the struct
- <structname>drm_device</structname>
- <structfield>vblank_disable_allowed</structfield> field to 0. This will
- keep vertical blanking interrupts enabled permanently until the first mode
- set operation, where <structfield>vblank_disable_allowed</structfield> is
- set to 1. The reason behind this is not clear. Drivers can set the field
- to 1 after <function>calling drm_vblank_init</function> to make vertical
- blanking interrupts dynamically managed from the beginning.
+ <methodname>load</methodname> operation.
</para>
<para>
Vertical blanking interrupts can be enabled by the DRM core or by drivers
diff --git a/MAINTAINERS b/MAINTAINERS
index 069c7c9a396e..cb88f724e07c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3854,6 +3854,9 @@ T: git git://people.freedesktop.org/~airlied/linux
S: Maintained
F: drivers/gpu/drm/
F: drivers/gpu/vga/
+F: Documentation/devicetree/bindings/display/
+F: Documentation/devicetree/bindings/gpu/
+F: Documentation/devicetree/bindings/video/
F: Documentation/DocBook/gpu.*
F: include/drm/
F: include/uapi/drm/
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index bca14c899137..757390eb562b 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -223,36 +223,19 @@ static void __init intel_remapping_check(int num, int slot, int func)
* despite the efforts of the "RAM buffer" approach, which simply rounds
* memory boundaries up to 64M to try to catch space that may decode
* as RAM and so is not suitable for MMIO.
- *
- * And yes, so far on current devices the base addr is always under 4G.
*/
-static u32 __init intel_stolen_base(int num, int slot, int func, size_t stolen_size)
-{
- u32 base;
-
- /*
- * For the PCI IDs in this quirk, the stolen base is always
- * in 0x5c, aka the BDSM register (yes that's really what
- * it's called).
- */
- base = read_pci_config(num, slot, func, 0x5c);
- base &= ~((1<<20) - 1);
-
- return base;
-}
#define KB(x) ((x) * 1024UL)
#define MB(x) (KB (KB (x)))
-#define GB(x) (MB (KB (x)))
static size_t __init i830_tseg_size(void)
{
- u8 tmp = read_pci_config_byte(0, 0, 0, I830_ESMRAMC);
+ u8 esmramc = read_pci_config_byte(0, 0, 0, I830_ESMRAMC);
- if (!(tmp & TSEG_ENABLE))
+ if (!(esmramc & TSEG_ENABLE))
return 0;
- if (tmp & I830_TSEG_SIZE_1M)
+ if (esmramc & I830_TSEG_SIZE_1M)
return MB(1);
else
return KB(512);
@@ -260,27 +243,26 @@ static size_t __init i830_tseg_size(void)
static size_t __init i845_tseg_size(void)
{
- u8 tmp = read_pci_config_byte(0, 0, 0, I845_ESMRAMC);
+ u8 esmramc = read_pci_config_byte(0, 0, 0, I845_ESMRAMC);
+ u8 tseg_size = esmramc & I845_TSEG_SIZE_MASK;
- if (!(tmp & TSEG_ENABLE))
+ if (!(esmramc & TSEG_ENABLE))
return 0;
- switch (tmp & I845_TSEG_SIZE_MASK) {
- case I845_TSEG_SIZE_512K:
- return KB(512);
- case I845_TSEG_SIZE_1M:
- return MB(1);
+ switch (tseg_size) {
+ case I845_TSEG_SIZE_512K: return KB(512);
+ case I845_TSEG_SIZE_1M: return MB(1);
default:
- WARN_ON(1);
- return 0;
+ WARN(1, "Unknown ESMRAMC value: %x!\n", esmramc);
}
+ return 0;
}
static size_t __init i85x_tseg_size(void)
{
- u8 tmp = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC);
+ u8 esmramc = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC);
- if (!(tmp & TSEG_ENABLE))
+ if (!(esmramc & TSEG_ENABLE))
return 0;
return MB(1);
@@ -300,285 +282,287 @@ static size_t __init i85x_mem_size(void)
* On 830/845/85x the stolen memory base isn't available in any
* register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
*/
-static u32 __init i830_stolen_base(int num, int slot, int func, size_t stolen_size)
+static phys_addr_t __init i830_stolen_base(int num, int slot, int func,
+ size_t stolen_size)
{
- return i830_mem_size() - i830_tseg_size() - stolen_size;
+ return (phys_addr_t)i830_mem_size() - i830_tseg_size() - stolen_size;
}
-static u32 __init i845_stolen_base(int num, int slot, int func, size_t stolen_size)
+static phys_addr_t __init i845_stolen_base(int num, int slot, int func,
+ size_t stolen_size)
{
- return i830_mem_size() - i845_tseg_size() - stolen_size;
+ return (phys_addr_t)i830_mem_size() - i845_tseg_size() - stolen_size;
}
-static u32 __init i85x_stolen_base(int num, int slot, int func, size_t stolen_size)
+static phys_addr_t __init i85x_stolen_base(int num, int slot, int func,
+ size_t stolen_size)
{
- return i85x_mem_size() - i85x_tseg_size() - stolen_size;
+ return (phys_addr_t)i85x_mem_size() - i85x_tseg_size() - stolen_size;
}
-static u32 __init i865_stolen_base(int num, int slot, int func, size_t stolen_size)
+static phys_addr_t __init i865_stolen_base(int num, int slot, int func,
+ size_t stolen_size)
{
+ u16 toud;
+
/*
* FIXME is the graphics stolen memory region
* always at TOUD? Ie. is it always the last
* one to be allocated by the BIOS?
*/
- return read_pci_config_16(0, 0, 0, I865_TOUD) << 16;
+ toud = read_pci_config_16(0, 0, 0, I865_TOUD);
+
+ return (phys_addr_t)toud << 16;
+}
+
+static phys_addr_t __init gen3_stolen_base(int num, int slot, int func,
+ size_t stolen_size)
+{
+ u32 bsm;
+
+ /* Almost universally we can find the Graphics Base of Stolen Memory
+ * at register BSM (0x5c) in the igfx configuration space. On a few
+ * (desktop) machines this is also mirrored in the bridge device at
+ * different locations, or in the MCHBAR.
+ */
+ bsm = read_pci_config(num, slot, func, INTEL_BSM);
+
+ return (phys_addr_t)bsm & INTEL_BSM_MASK;
}
static size_t __init i830_stolen_size(int num, int slot, int func)
{
- size_t stolen_size;
u16 gmch_ctrl;
+ u16 gms;
gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
-
- switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
- case I830_GMCH_GMS_STOLEN_512:
- stolen_size = KB(512);
- break;
- case I830_GMCH_GMS_STOLEN_1024:
- stolen_size = MB(1);
- break;
- case I830_GMCH_GMS_STOLEN_8192:
- stolen_size = MB(8);
- break;
- case I830_GMCH_GMS_LOCAL:
- /* local memory isn't part of the normal address space */
- stolen_size = 0;
- break;
+ gms = gmch_ctrl & I830_GMCH_GMS_MASK;
+
+ switch (gms) {
+ case I830_GMCH_GMS_STOLEN_512: return KB(512);
+ case I830_GMCH_GMS_STOLEN_1024: return MB(1);
+ case I830_GMCH_GMS_STOLEN_8192: return MB(8);
+ /* local memory isn't part of the normal address space */
+ case I830_GMCH_GMS_LOCAL: return 0;
default:
- return 0;
+ WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl);
}
- return stolen_size;
+ return 0;
}
static size_t __init gen3_stolen_size(int num, int slot, int func)
{
- size_t stolen_size;
u16 gmch_ctrl;
+ u16 gms;
gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
-
- switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
- case I855_GMCH_GMS_STOLEN_1M:
- stolen_size = MB(1);
- break;
- case I855_GMCH_GMS_STOLEN_4M:
- stolen_size = MB(4);
- break;
- case I855_GMCH_GMS_STOLEN_8M:
- stolen_size = MB(8);
- break;
- case I855_GMCH_GMS_STOLEN_16M:
- stolen_size = MB(16);
- break;
- case I855_GMCH_GMS_STOLEN_32M:
- stolen_size = MB(32);
- break;
- case I915_GMCH_GMS_STOLEN_48M:
- stolen_size = MB(48);
- break;
- case I915_GMCH_GMS_STOLEN_64M:
- stolen_size = MB(64);
- break;
- case G33_GMCH_GMS_STOLEN_128M:
- stolen_size = MB(128);
- break;
- case G33_GMCH_GMS_STOLEN_256M:
- stolen_size = MB(256);
- break;
- case INTEL_GMCH_GMS_STOLEN_96M:
- stolen_size = MB(96);
- break;
- case INTEL_GMCH_GMS_STOLEN_160M:
- stolen_size = MB(160);
- break;
- case INTEL_GMCH_GMS_STOLEN_224M:
- stolen_size = MB(224);
- break;
- case INTEL_GMCH_GMS_STOLEN_352M:
- stolen_size = MB(352);
- break;
+ gms = gmch_ctrl & I855_GMCH_GMS_MASK;
+
+ switch (gms) {
+ case I855_GMCH_GMS_STOLEN_1M: return MB(1);
+ case I855_GMCH_GMS_STOLEN_4M: return MB(4);
+ case I855_GMCH_GMS_STOLEN_8M: return MB(8);
+ case I855_GMCH_GMS_STOLEN_16M: return MB(16);
+ case I855_GMCH_GMS_STOLEN_32M: return MB(32);
+ case I915_GMCH_GMS_STOLEN_48M: return MB(48);
+ case I915_GMCH_GMS_STOLEN_64M: return MB(64);
+ case G33_GMCH_GMS_STOLEN_128M: return MB(128);
+ case G33_GMCH_GMS_STOLEN_256M: return MB(256);
+ case INTEL_GMCH_GMS_STOLEN_96M: return MB(96);
+ case INTEL_GMCH_GMS_STOLEN_160M:return MB(160);
+ case INTEL_GMCH_GMS_STOLEN_224M:return MB(224);
+ case INTEL_GMCH_GMS_STOLEN_352M:return MB(352);
default:
- stolen_size = 0;
- break;
+ WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl);
}
- return stolen_size;
+ return 0;
}
static size_t __init gen6_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
+ u16 gms;
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
- gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
- gmch_ctrl &= SNB_GMCH_GMS_MASK;
+ gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;
- return gmch_ctrl << 25; /* 32 MB units */
+ return (size_t)gms * MB(32);
}
static size_t __init gen8_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
+ u16 gms;
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
- gmch_ctrl >>= BDW_GMCH_GMS_SHIFT;
- gmch_ctrl &= BDW_GMCH_GMS_MASK;
- return gmch_ctrl << 25; /* 32 MB units */
+ gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;
+
+ return (size_t)gms * MB(32);
}
static size_t __init chv_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
+ u16 gms;
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
- gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
- gmch_ctrl &= SNB_GMCH_GMS_MASK;
+ gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;
/*
* 0x0 to 0x10: 32MB increments starting at 0MB
* 0x11 to 0x16: 4MB increments starting at 8MB
* 0x17 to 0x1d: 4MB increments start at 36MB
*/
- if (gmch_ctrl < 0x11)
- return gmch_ctrl << 25;
- else if (gmch_ctrl < 0x17)
- return (gmch_ctrl - 0x11 + 2) << 22;
+ if (gms < 0x11)
+ return (size_t)gms * MB(32);
+ else if (gms < 0x17)
+ return (size_t)(gms - 0x11 + 2) * MB(4);
else
- return (gmch_ctrl - 0x17 + 9) << 22;
+ return (size_t)(gms - 0x17 + 9) * MB(4);
}
-struct intel_stolen_funcs {
- size_t (*size)(int num, int slot, int func);
- u32 (*base)(int num, int slot, int func, size_t size);
-};
-
static size_t __init gen9_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
+ u16 gms;
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
- gmch_ctrl >>= BDW_GMCH_GMS_SHIFT;
- gmch_ctrl &= BDW_GMCH_GMS_MASK;
+ gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;
- if (gmch_ctrl < 0xf0)
- return gmch_ctrl << 25; /* 32 MB units */
+ /* 0x0 to 0xef: 32MB increments starting at 0MB */
+ /* 0xf0 to 0xfe: 4MB increments starting at 4MB */
+ if (gms < 0xf0)
+ return (size_t)gms * MB(32);
else
- /* 4MB increments starting at 0xf0 for 4MB */
- return (gmch_ctrl - 0xf0 + 1) << 22;
+ return (size_t)(gms - 0xf0 + 1) * MB(4);
}
-typedef size_t (*stolen_size_fn)(int num, int slot, int func);
+struct intel_early_ops {
+ size_t (*stolen_size)(int num, int slot, int func);
+ phys_addr_t (*stolen_base)(int num, int slot, int func, size_t size);
+};
-static const struct intel_stolen_funcs i830_stolen_funcs __initconst = {
- .base = i830_stolen_base,
- .size = i830_stolen_size,
+static const struct intel_early_ops i830_early_ops __initconst = {
+ .stolen_base = i830_stolen_base,
+ .stolen_size = i830_stolen_size,
};
-static const struct intel_stolen_funcs i845_stolen_funcs __initconst = {
- .base = i845_stolen_base,
- .size = i830_stolen_size,
+static const struct intel_early_ops i845_early_ops __initconst = {
+ .stolen_base = i845_stolen_base,
+ .stolen_size = i830_stolen_size,
};
-static const struct intel_stolen_funcs i85x_stolen_funcs __initconst = {
- .base = i85x_stolen_base,
- .size = gen3_stolen_size,
+static const struct intel_early_ops i85x_early_ops __initconst = {
+ .stolen_base = i85x_stolen_base,
+ .stolen_size = gen3_stolen_size,
};
-static const struct intel_stolen_funcs i865_stolen_funcs __initconst = {
- .base = i865_stolen_base,
- .size = gen3_stolen_size,
+static const struct intel_early_ops i865_early_ops __initconst = {
+ .stolen_base = i865_stolen_base,
+ .stolen_size = gen3_stolen_size,
};
-static const struct intel_stolen_funcs gen3_stolen_funcs __initconst = {
- .base = intel_stolen_base,
- .size = gen3_stolen_size,
+static const struct intel_early_ops gen3_early_ops __initconst = {
+ .stolen_base = gen3_stolen_base,
+ .stolen_size = gen3_stolen_size,
};
-static const struct intel_stolen_funcs gen6_stolen_funcs __initconst = {
- .base = intel_stolen_base,
- .size = gen6_stolen_size,
+static const struct intel_early_ops gen6_early_ops __initconst = {
+ .stolen_base = gen3_stolen_base,
+ .stolen_size = gen6_stolen_size,
};
-static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = {
- .base = intel_stolen_base,
- .size = gen8_stolen_size,
+static const struct intel_early_ops gen8_early_ops __initconst = {
+ .stolen_base = gen3_stolen_base,
+ .stolen_size = gen8_stolen_size,
};
-static const struct intel_stolen_funcs gen9_stolen_funcs __initconst = {
- .base = intel_stolen_base,
- .size = gen9_stolen_size,
+static const struct intel_early_ops gen9_early_ops __initconst = {
+ .stolen_base = gen3_stolen_base,
+ .stolen_size = gen9_stolen_size,
};
-static const struct intel_stolen_funcs chv_stolen_funcs __initconst = {
- .base = intel_stolen_base,
- .size = chv_stolen_size,
+static const struct intel_early_ops chv_early_ops __initconst = {
+ .stolen_base = gen3_stolen_base,
+ .stolen_size = chv_stolen_size,
};
-static const struct pci_device_id intel_stolen_ids[] __initconst = {
- INTEL_I830_IDS(&i830_stolen_funcs),
- INTEL_I845G_IDS(&i845_stolen_funcs),
- INTEL_I85X_IDS(&i85x_stolen_funcs),
- INTEL_I865G_IDS(&i865_stolen_funcs),
- INTEL_I915G_IDS(&gen3_stolen_funcs),
- INTEL_I915GM_IDS(&gen3_stolen_funcs),
- INTEL_I945G_IDS(&gen3_stolen_funcs),
- INTEL_I945GM_IDS(&gen3_stolen_funcs),
- INTEL_VLV_M_IDS(&gen6_stolen_funcs),
- INTEL_VLV_D_IDS(&gen6_stolen_funcs),
- INTEL_PINEVIEW_IDS(&gen3_stolen_funcs),
- INTEL_I965G_IDS(&gen3_stolen_funcs),
- INTEL_G33_IDS(&gen3_stolen_funcs),
- INTEL_I965GM_IDS(&gen3_stolen_funcs),
- INTEL_GM45_IDS(&gen3_stolen_funcs),
- INTEL_G45_IDS(&gen3_stolen_funcs),
- INTEL_IRONLAKE_D_IDS(&gen3_stolen_funcs),
- INTEL_IRONLAKE_M_IDS(&gen3_stolen_funcs),
- INTEL_SNB_D_IDS(&gen6_stolen_funcs),
- INTEL_SNB_M_IDS(&gen6_stolen_funcs),
- INTEL_IVB_M_IDS(&gen6_stolen_funcs),
- INTEL_IVB_D_IDS(&gen6_stolen_funcs),
- INTEL_HSW_D_IDS(&gen6_stolen_funcs),
- INTEL_HSW_M_IDS(&gen6_stolen_funcs),
- INTEL_BDW_M_IDS(&gen8_stolen_funcs),
- INTEL_BDW_D_IDS(&gen8_stolen_funcs),
- INTEL_CHV_IDS(&chv_stolen_funcs),
- INTEL_SKL_IDS(&gen9_stolen_funcs),
- INTEL_BXT_IDS(&gen9_stolen_funcs),
- INTEL_KBL_IDS(&gen9_stolen_funcs),
+static const struct pci_device_id intel_early_ids[] __initconst = {
+ INTEL_I830_IDS(&i830_early_ops),
+ INTEL_I845G_IDS(&i845_early_ops),
+ INTEL_I85X_IDS(&i85x_early_ops),
+ INTEL_I865G_IDS(&i865_early_ops),
+ INTEL_I915G_IDS(&gen3_early_ops),
+ INTEL_I915GM_IDS(&gen3_early_ops),
+ INTEL_I945G_IDS(&gen3_early_ops),
+ INTEL_I945GM_IDS(&gen3_early_ops),
+ INTEL_VLV_M_IDS(&gen6_early_ops),
+ INTEL_VLV_D_IDS(&gen6_early_ops),
+ INTEL_PINEVIEW_IDS(&gen3_early_ops),
+ INTEL_I965G_IDS(&gen3_early_ops),
+ INTEL_G33_IDS(&gen3_early_ops),
+ INTEL_I965GM_IDS(&gen3_early_ops),
+ INTEL_GM45_IDS(&gen3_early_ops),
+ INTEL_G45_IDS(&gen3_early_ops),
+ INTEL_IRONLAKE_D_IDS(&gen3_early_ops),
+ INTEL_IRONLAKE_M_IDS(&gen3_early_ops),
+ INTEL_SNB_D_IDS(&gen6_early_ops),
+ INTEL_SNB_M_IDS(&gen6_early_ops),
+ INTEL_IVB_M_IDS(&gen6_early_ops),
+ INTEL_IVB_D_IDS(&gen6_early_ops),
+ INTEL_HSW_D_IDS(&gen6_early_ops),
+ INTEL_HSW_M_IDS(&gen6_early_ops),
+ INTEL_BDW_M_IDS(&gen8_early_ops),
+ INTEL_BDW_D_IDS(&gen8_early_ops),
+ INTEL_CHV_IDS(&chv_early_ops),
+ INTEL_SKL_IDS(&gen9_early_ops),
+ INTEL_BXT_IDS(&gen9_early_ops),
+ INTEL_KBL_IDS(&gen9_early_ops),
};
-static void __init intel_graphics_stolen(int num, int slot, int func)
+static void __init
+intel_graphics_stolen(int num, int slot, int func,
+ const struct intel_early_ops *early_ops)
{
+ phys_addr_t base, end;
size_t size;
+
+ size = early_ops->stolen_size(num, slot, func);
+ base = early_ops->stolen_base(num, slot, func, size);
+
+ if (!size || !base)
+ return;
+
+ end = base + size - 1;
+ printk(KERN_INFO "Reserving Intel graphics memory at %pa-%pa\n",
+ &base, &end);
+
+ /* Mark this space as reserved */
+ e820_add_region(base, size, E820_RESERVED);
+ sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+}
+
+static void __init intel_graphics_quirks(int num, int slot, int func)
+{
+ const struct intel_early_ops *early_ops;
+ u16 device;
int i;
- u32 start;
- u16 device, subvendor, subdevice;
device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
- subvendor = read_pci_config_16(num, slot, func,
- PCI_SUBSYSTEM_VENDOR_ID);
- subdevice = read_pci_config_16(num, slot, func, PCI_SUBSYSTEM_ID);
-
- for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) {
- if (intel_stolen_ids[i].device == device) {
- const struct intel_stolen_funcs *stolen_funcs =
- (const struct intel_stolen_funcs *)intel_stolen_ids[i].driver_data;
- size = stolen_funcs->size(num, slot, func);
- start = stolen_funcs->base(num, slot, func, size);
- if (size && start) {
- printk(KERN_INFO "Reserving Intel graphics stolen memory at 0x%x-0x%x\n",
- start, start + (u32)size - 1);
- /* Mark this space as reserved */
- e820_add_region(start, size, E820_RESERVED);
- sanitize_e820_map(e820.map,
- ARRAY_SIZE(e820.map),
- &e820.nr_map);
- }
- return;
- }
+
+ for (i = 0; i < ARRAY_SIZE(intel_early_ids); i++) {
+ kernel_ulong_t driver_data = intel_early_ids[i].driver_data;
+
+ if (intel_early_ids[i].device != device)
+ continue;
+
+ early_ops = (typeof(early_ops))driver_data;
+
+ intel_graphics_stolen(num, slot, func, early_ops);
+
+ return;
}
}
@@ -627,7 +611,7 @@ static struct chipset early_qrk[] __initdata = {
{ PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
- QFLAG_APPLY_ONCE, intel_graphics_stolen },
+ QFLAG_APPLY_ONCE, intel_graphics_quirks },
/*
* HPET on the current version of the Baytrail platform has accuracy
* problems: it will halt in deep idle state - so we disable it.
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 4a424eca75ed..f353db213a81 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,2 +1,2 @@
-obj-y := dma-buf.o fence.o reservation.o seqno-fence.o
+obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o
obj-$(CONFIG_SYNC_FILE) += sync_file.o
diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/fence-array.c
new file mode 100644
index 000000000000..a8731c853da6
--- /dev/null
+++ b/drivers/dma-buf/fence-array.c
@@ -0,0 +1,144 @@
+/*
+ * fence-array: aggregate fences to be waited together
+ *
+ * Copyright (C) 2016 Collabora Ltd
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Authors:
+ * Gustavo Padovan <gustavo@padovan.org>
+ * Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/fence-array.h>
+
+static void fence_array_cb_func(struct fence *f, struct fence_cb *cb);
+
+static const char *fence_array_get_driver_name(struct fence *fence)
+{
+ return "fence_array";
+}
+
+static const char *fence_array_get_timeline_name(struct fence *fence)
+{
+ return "unbound";
+}
+
+static void fence_array_cb_func(struct fence *f, struct fence_cb *cb)
+{
+ struct fence_array_cb *array_cb =
+ container_of(cb, struct fence_array_cb, cb);
+ struct fence_array *array = array_cb->array;
+
+ if (atomic_dec_and_test(&array->num_pending))
+ fence_signal(&array->base);
+ fence_put(&array->base);
+}
+
+static bool fence_array_enable_signaling(struct fence *fence)
+{
+ struct fence_array *array = to_fence_array(fence);
+ struct fence_array_cb *cb = (void *)(&array[1]);
+ unsigned i;
+
+ for (i = 0; i < array->num_fences; ++i) {
+ cb[i].array = array;
+ /*
+ * As we may report that the fence is signaled before all
+ * callbacks are complete, we need to take an additional
+ * reference count on the array so that we do not free it too
+ * early. The core fence handling will only hold the reference
+ * until we signal the array as complete (but that is now
+ * insufficient).
+ */
+ fence_get(&array->base);
+ if (fence_add_callback(array->fences[i], &cb[i].cb,
+ fence_array_cb_func)) {
+ fence_put(&array->base);
+ if (atomic_dec_and_test(&array->num_pending))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool fence_array_signaled(struct fence *fence)
+{
+ struct fence_array *array = to_fence_array(fence);
+
+ return atomic_read(&array->num_pending) <= 0;
+}
+
+static void fence_array_release(struct fence *fence)
+{
+ struct fence_array *array = to_fence_array(fence);
+ unsigned i;
+
+ for (i = 0; i < array->num_fences; ++i)
+ fence_put(array->fences[i]);
+
+ kfree(array->fences);
+ fence_free(fence);
+}
+
+const struct fence_ops fence_array_ops = {
+ .get_driver_name = fence_array_get_driver_name,
+ .get_timeline_name = fence_array_get_timeline_name,
+ .enable_signaling = fence_array_enable_signaling,
+ .signaled = fence_array_signaled,
+ .wait = fence_default_wait,
+ .release = fence_array_release,
+};
+
+/**
+ * fence_array_create - Create a custom fence array
+ * @num_fences: [in] number of fences to add in the array
+ * @fences: [in] array containing the fences
+ * @context: [in] fence context to use
+ * @seqno: [in] sequence number to use
+ * @signal_on_any [in] signal on any fence in the array
+ *
+ * Allocate a fence_array object and initialize the base fence with fence_init().
+ * In case of error it returns NULL.
+ *
+ * The caller should allocte the fences array with num_fences size
+ * and fill it with the fences it wants to add to the object. Ownership of this
+ * array is take and fence_put() is used on each fence on release.
+ *
+ * If @signal_on_any is true the fence array signals if any fence in the array
+ * signals, otherwise it signals when all fences in the array signal.
+ */
+struct fence_array *fence_array_create(int num_fences, struct fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any)
+{
+ struct fence_array *array;
+ size_t size = sizeof(*array);
+
+ /* Allocate the callback structures behind the array. */
+ size += num_fences * sizeof(struct fence_array_cb);
+ array = kzalloc(size, GFP_KERNEL);
+ if (!array)
+ return NULL;
+
+ spin_lock_init(&array->lock);
+ fence_init(&array->base, &fence_array_ops, &array->lock,
+ context, seqno);
+
+ array->num_fences = num_fences;
+ atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
+ array->fences = fences;
+
+ return array;
+}
+EXPORT_SYMBOL(fence_array_create);
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index 7b05dbe9b296..4d51f9e83fa8 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -35,7 +35,7 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit);
* context or not. One device can have multiple separate contexts,
* and they're used if some engine can run independently of another.
*/
-static atomic_t fence_context_counter = ATOMIC_INIT(0);
+static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
/**
* fence_context_alloc - allocate an array of fence contexts
@@ -44,10 +44,10 @@ static atomic_t fence_context_counter = ATOMIC_INIT(0);
* This function will return the first index of the number of fences allocated.
* The fence context is used for setting fence->context to a unique number.
*/
-unsigned fence_context_alloc(unsigned num)
+u64 fence_context_alloc(unsigned num)
{
BUG_ON(!num);
- return atomic_add_return(num, &fence_context_counter) - num;
+ return atomic64_add_return(num, &fence_context_counter) - num;
}
EXPORT_SYMBOL(fence_context_alloc);
@@ -513,7 +513,7 @@ EXPORT_SYMBOL(fence_wait_any_timeout);
*/
void
fence_init(struct fence *fence, const struct fence_ops *ops,
- spinlock_t *lock, unsigned context, unsigned seqno)
+ spinlock_t *lock, u64 context, unsigned seqno)
{
BUG_ON(!lock);
BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index f08cf2d8309e..9aaa608dfe01 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -82,7 +82,7 @@ struct sync_file *sync_file_create(struct fence *fence)
sync_file->num_fences = 1;
atomic_set(&sync_file->status, 1);
- snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%d-%d",
+ snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence), fence->context,
fence->seqno);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 992f00b65be4..da3d02154fa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -2032,7 +2032,7 @@ struct amdgpu_device {
struct amdgpu_irq_src hpd_irq;
/* rings */
- unsigned fence_context;
+ u64 fence_context;
unsigned num_rings;
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
bool ib_pool_ready;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b0832da2ef7e..0b5f3accb1e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -240,7 +240,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
work->base = base;
- r = drm_vblank_get(crtc->dev, amdgpu_crtc->crtc_id);
+ r = drm_crtc_vblank_get(crtc);
if (r) {
DRM_ERROR("failed to get vblank before flip\n");
goto pflip_cleanup;
@@ -268,7 +268,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
return 0;
vblank_cleanup:
- drm_vblank_put(crtc->dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
pflip_cleanup:
if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 8bf84efafb04..b16366c2b4a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -427,7 +427,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
soffset, eoffset, eoffset - soffset);
if (i->fence)
- seq_printf(m, " protected by 0x%08x on context %d",
+ seq_printf(m, " protected by 0x%08x on context %llu",
i->fence->seqno, i->fence->context);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 8227344d2ff6..112e358f0f9b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2667,19 +2667,21 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
}
}
-static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
amdgpu_crtc->lut_r[i] = red[i] >> 6;
amdgpu_crtc->lut_g[i] = green[i] >> 6;
amdgpu_crtc->lut_b[i] = blue[i] >> 6;
}
dce_v10_0_crtc_load_lut(crtc);
+
+ return 0;
}
static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
@@ -3372,7 +3374,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index af26ec0bc59d..b522fa2435a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2678,19 +2678,21 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
}
}
-static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
amdgpu_crtc->lut_r[i] = red[i] >> 6;
amdgpu_crtc->lut_g[i] = green[i] >> 6;
amdgpu_crtc->lut_b[i] = blue[i] >> 6;
}
dce_v11_0_crtc_load_lut(crtc);
+
+ return 0;
}
static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
@@ -3433,7 +3435,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 3fb65e41a6ef..b50ed72feedb 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2574,19 +2574,21 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
}
}
-static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
amdgpu_crtc->lut_r[i] = red[i] >> 6;
amdgpu_crtc->lut_g[i] = green[i] >> 6;
amdgpu_crtc->lut_b[i] = blue[i] >> 6;
}
dce_v8_0_crtc_load_lut(crtc);
+
+ return 0;
}
static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
@@ -3376,7 +3378,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
return 0;
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 76e187a5bde0..7675bbc70133 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -92,7 +92,7 @@ static void arcpgu_preclose(struct drm_device *drm, struct drm_file *file)
if (e->base.file_priv != file)
continue;
list_del(&e->base.link);
- e->base.destroy(&e->base);
+ kfree(&e->base);
}
spin_unlock_irqrestore(&drm->event_lock, flags);
}
@@ -207,7 +207,7 @@ static struct drm_driver arcpgu_drm_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 0813c2f06931..48019ae22ddb 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -196,30 +196,11 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
}
}
-static void hdlcd_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
-{
-}
-
-static bool hdlcd_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
- .mode_fixup = hdlcd_crtc_mode_fixup,
- .mode_set = drm_helper_crtc_mode_set,
- .mode_set_base = drm_helper_crtc_mode_set_base,
- .mode_set_nofb = hdlcd_crtc_mode_set_nofb,
.enable = hdlcd_crtc_enable,
.disable = hdlcd_crtc_disable,
- .prepare = hdlcd_crtc_disable,
- .commit = hdlcd_crtc_enable,
.atomic_check = hdlcd_crtc_atomic_check,
.atomic_begin = hdlcd_crtc_atomic_begin,
- .atomic_flush = hdlcd_crtc_atomic_flush,
};
static int hdlcd_plane_atomic_check(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index a6ca36f0096f..49e586d67595 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -296,7 +296,7 @@ static struct drm_driver hdlcd_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = hdlcd_enable_vblank,
.disable_vblank = hdlcd_disable_vblank,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 3130aa8bcdd0..34405e4a5d36 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -199,7 +199,7 @@ static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
/* Handle any pending frame work. */
if (work) {
work->fn(dcrtc, plane, work);
- drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ drm_crtc_vblank_put(&dcrtc->crtc);
}
wake_up(&plane->frame_wait);
@@ -210,7 +210,7 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
{
int ret;
- ret = drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+ ret = drm_crtc_vblank_get(&dcrtc->crtc);
if (ret) {
DRM_ERROR("failed to acquire vblank counter\n");
return ret;
@@ -218,7 +218,7 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0;
if (ret)
- drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ drm_crtc_vblank_put(&dcrtc->crtc);
return ret;
}
@@ -234,7 +234,7 @@ struct armada_plane_work *armada_drm_plane_work_cancel(
struct armada_plane_work *work = xchg(&plane->work, NULL);
if (work)
- drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ drm_crtc_vblank_put(&dcrtc->crtc);
return work;
}
@@ -260,7 +260,7 @@ static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc,
if (fwork->event) {
spin_lock_irqsave(&dev->event_lock, flags);
- drm_send_vblank_event(dev, dcrtc->num, fwork->event);
+ drm_crtc_send_vblank_event(&dcrtc->crtc, fwork->event);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -592,9 +592,9 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
if (interlaced ^ dcrtc->interlaced) {
if (adj->flags & DRM_MODE_FLAG_INTERLACE)
- drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+ drm_crtc_vblank_get(&dcrtc->crtc);
else
- drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ drm_crtc_vblank_put(&dcrtc->crtc);
dcrtc->interlaced = interlaced;
}
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 439824a61aa5..cb21c0b6374a 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -197,7 +197,7 @@ static struct drm_driver armada_drm_driver = {
.debugfs_init = armada_drm_debugfs_init,
.debugfs_cleanup = armada_drm_debugfs_cleanup,
#endif
- .gem_free_object = armada_gem_free_object,
+ .gem_free_object_unlocked = armada_gem_free_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = armada_gem_prime_export,
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index fcd9c0714836..f54afd2113a9 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -209,7 +209,7 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- .gem_free_object = ast_gem_free_object,
+ .gem_free_object_unlocked = ast_gem_free_object,
.dumb_create = ast_dumb_create,
.dumb_map_offset = ast_dumb_mmap_offset,
.dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 5320f8c57884..c017a9330a18 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -167,12 +167,9 @@ static int astfb_create_object(struct ast_fbdev *afbdev,
struct drm_gem_object **gobj_p)
{
struct drm_device *dev = afbdev->helper.dev;
- u32 bpp, depth;
u32 size;
struct drm_gem_object *gobj;
-
int ret = 0;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
size = mode_cmd->pitches[0] * mode_cmd->height;
ret = ast_gem_create(dev, size, true, &gobj);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index c337922606e3..5957c3e659fe 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -624,19 +624,21 @@ static void ast_crtc_reset(struct drm_crtc *crtc)
}
-static void ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
ast_crtc->lut_r[i] = red[i] >> 8;
ast_crtc->lut_g[i] = green[i] >> 8;
ast_crtc->lut_b[i] = blue[i] >> 8;
}
ast_crtc_load_lut(crtc);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index bd12231ab0cd..613f6c99b76a 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -374,8 +374,8 @@ static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc)
spin_lock_irqsave(&dev->event_lock, flags);
if (crtc->event) {
- drm_send_vblank_event(dev, crtc->id, crtc->event);
- drm_vblank_put(dev, crtc->id);
+ drm_crtc_send_vblank_event(&crtc->base, crtc->event);
+ drm_crtc_vblank_put(&crtc->base);
crtc->event = NULL;
}
spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 8ded7645747e..6485fa5bee8b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -776,7 +776,7 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = atmel_hlcdc_dc_enable_vblank,
.disable_vblank = atmel_hlcdc_dc_disable_vblank,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index b332b4d3b0e2..abace82de6ea 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -89,7 +89,7 @@ static struct drm_driver bochs_driver = {
.date = "20130925",
.major = 1,
.minor = 0,
- .gem_free_object = bochs_gem_free_object,
+ .gem_free_object_unlocked = bochs_gem_free_object,
.dumb_create = bochs_dumb_create,
.dumb_map_offset = bochs_dumb_mmap_offset,
.dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index dc83f69da6f1..b05f7eae32ce 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -142,7 +142,7 @@ static struct drm_driver driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- .gem_free_object = cirrus_gem_free_object,
+ .gem_free_object_unlocked = cirrus_gem_free_object,
.dumb_create = cirrus_dumb_create,
.dumb_map_offset = cirrus_dumb_mmap_offset,
.dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index d3d8d7bfcc57..17c915d9a03e 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -325,21 +325,20 @@ static void cirrus_crtc_commit(struct drm_crtc *crtc)
* use this for 8-bit mode so can't perform smooth fades on deeper modes,
* but it's a requirement that we provide the function
*/
-static void cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
int i;
- if (size != CIRRUS_LUT_SIZE)
- return;
-
- for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+ for (i = 0; i < size; i++) {
cirrus_crtc->lut_r[i] = red[i];
cirrus_crtc->lut_g[i] = green[i];
cirrus_crtc->lut_b[i] = blue[i];
}
cirrus_crtc_load_lut(crtc);
+
+ return 0;
}
/* Simple cleanup function */
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index c204ef32df16..5e4b820a977c 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -44,11 +44,8 @@
void drm_atomic_state_default_release(struct drm_atomic_state *state)
{
kfree(state->connectors);
- kfree(state->connector_states);
kfree(state->crtcs);
- kfree(state->crtc_states);
kfree(state->planes);
- kfree(state->plane_states);
}
EXPORT_SYMBOL(drm_atomic_state_default_release);
@@ -72,18 +69,10 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
sizeof(*state->crtcs), GFP_KERNEL);
if (!state->crtcs)
goto fail;
- state->crtc_states = kcalloc(dev->mode_config.num_crtc,
- sizeof(*state->crtc_states), GFP_KERNEL);
- if (!state->crtc_states)
- goto fail;
state->planes = kcalloc(dev->mode_config.num_total_plane,
sizeof(*state->planes), GFP_KERNEL);
if (!state->planes)
goto fail;
- state->plane_states = kcalloc(dev->mode_config.num_total_plane,
- sizeof(*state->plane_states), GFP_KERNEL);
- if (!state->plane_states)
- goto fail;
state->dev = dev;
@@ -139,40 +128,40 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
for (i = 0; i < state->num_connector; i++) {
- struct drm_connector *connector = state->connectors[i];
+ struct drm_connector *connector = state->connectors[i].ptr;
if (!connector)
continue;
connector->funcs->atomic_destroy_state(connector,
- state->connector_states[i]);
- state->connectors[i] = NULL;
- state->connector_states[i] = NULL;
+ state->connectors[i].state);
+ state->connectors[i].ptr = NULL;
+ state->connectors[i].state = NULL;
drm_connector_unreference(connector);
}
for (i = 0; i < config->num_crtc; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
+ struct drm_crtc *crtc = state->crtcs[i].ptr;
if (!crtc)
continue;
crtc->funcs->atomic_destroy_state(crtc,
- state->crtc_states[i]);
- state->crtcs[i] = NULL;
- state->crtc_states[i] = NULL;
+ state->crtcs[i].state);
+ state->crtcs[i].ptr = NULL;
+ state->crtcs[i].state = NULL;
}
for (i = 0; i < config->num_total_plane; i++) {
- struct drm_plane *plane = state->planes[i];
+ struct drm_plane *plane = state->planes[i].ptr;
if (!plane)
continue;
plane->funcs->atomic_destroy_state(plane,
- state->plane_states[i]);
- state->planes[i] = NULL;
- state->plane_states[i] = NULL;
+ state->planes[i].state);
+ state->planes[i].ptr = NULL;
+ state->planes[i].state = NULL;
}
}
EXPORT_SYMBOL(drm_atomic_state_default_clear);
@@ -270,8 +259,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
if (!crtc_state)
return ERR_PTR(-ENOMEM);
- state->crtc_states[index] = crtc_state;
- state->crtcs[index] = crtc;
+ state->crtcs[index].state = crtc_state;
+ state->crtcs[index].ptr = crtc;
crtc_state->state = state;
DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
@@ -632,8 +621,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
if (!plane_state)
return ERR_PTR(-ENOMEM);
- state->plane_states[index] = plane_state;
- state->planes[index] = plane;
+ state->planes[index].state = plane_state;
+ state->planes[index].ptr = plane;
plane_state->state = state;
DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
@@ -897,8 +886,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
index = drm_connector_index(connector);
if (index >= state->num_connector) {
- struct drm_connector **c;
- struct drm_connector_state **cs;
+ struct __drm_connnectors_state *c;
int alloc = max(index + 1, config->num_connector);
c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
@@ -909,26 +897,19 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
memset(&state->connectors[state->num_connector], 0,
sizeof(*state->connectors) * (alloc - state->num_connector));
- cs = krealloc(state->connector_states, alloc * sizeof(*state->connector_states), GFP_KERNEL);
- if (!cs)
- return ERR_PTR(-ENOMEM);
-
- state->connector_states = cs;
- memset(&state->connector_states[state->num_connector], 0,
- sizeof(*state->connector_states) * (alloc - state->num_connector));
state->num_connector = alloc;
}
- if (state->connector_states[index])
- return state->connector_states[index];
+ if (state->connectors[index].state)
+ return state->connectors[index].state;
connector_state = connector->funcs->atomic_duplicate_state(connector);
if (!connector_state)
return ERR_PTR(-ENOMEM);
drm_connector_reference(connector);
- state->connector_states[index] = connector_state;
- state->connectors[index] = connector;
+ state->connectors[index].state = connector_state;
+ state->connectors[index].ptr = connector;
connector_state->state = state;
DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n",
@@ -1432,7 +1413,8 @@ EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
*/
static struct drm_pending_vblank_event *create_vblank_event(
- struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data)
+ struct drm_device *dev, struct drm_file *file_priv,
+ struct fence *fence, uint64_t user_data)
{
struct drm_pending_vblank_event *e = NULL;
int ret;
@@ -1445,12 +1427,17 @@ static struct drm_pending_vblank_event *create_vblank_event(
e->event.base.length = sizeof(e->event);
e->event.user_data = user_data;
- ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
- if (ret) {
- kfree(e);
- return NULL;
+ if (file_priv) {
+ ret = drm_event_reserve_init(dev, file_priv, &e->base,
+ &e->event.base);
+ if (ret) {
+ kfree(e);
+ return NULL;
+ }
}
+ e->base.fence = fence;
+
return e;
}
@@ -1690,7 +1677,8 @@ retry:
for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct drm_pending_vblank_event *e;
- e = create_vblank_event(dev, file_priv, arg->user_data);
+ e = create_vblank_event(dev, file_priv, NULL,
+ arg->user_data);
if (!e) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ddfa0d120e39..bb98d74d1a2e 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -110,8 +110,10 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
if (funcs->atomic_best_encoder)
new_encoder = funcs->atomic_best_encoder(connector, conn_state);
- else
+ else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
+ else
+ new_encoder = drm_atomic_helper_best_encoder(connector);
if (new_encoder) {
if (encoder_mask & (1 << drm_encoder_index(new_encoder))) {
@@ -298,8 +300,10 @@ update_connector_routing(struct drm_atomic_state *state,
if (funcs->atomic_best_encoder)
new_encoder = funcs->atomic_best_encoder(connector,
connector_state);
- else
+ else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
+ else
+ new_encoder = drm_atomic_helper_best_encoder(connector);
if (!new_encoder) {
DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -414,6 +418,9 @@ mode_fixup(struct drm_atomic_state *state)
for_each_crtc_in_state(state, crtc, crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
+ if (!crtc_state->enable)
+ continue;
+
if (!crtc_state->mode_changed &&
!crtc_state->connectors_changed)
continue;
@@ -611,7 +618,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
if (!funcs || !funcs->atomic_check)
continue;
- ret = funcs->atomic_check(crtc, state->crtc_states[i]);
+ ret = funcs->atomic_check(crtc, crtc_state);
if (ret) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
crtc->base.id, crtc->name);
@@ -1249,16 +1256,12 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
struct drm_atomic_state *state)
{
- int nplanes = dev->mode_config.num_total_plane;
- int ret, i;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ int ret, i, j;
- for (i = 0; i < nplanes; i++) {
+ for_each_plane_in_state(state, plane, plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
- struct drm_plane *plane = state->planes[i];
- struct drm_plane_state *plane_state = state->plane_states[i];
-
- if (!plane)
- continue;
funcs = plane->helper_private;
@@ -1272,12 +1275,10 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
return 0;
fail:
- for (i--; i >= 0; i--) {
+ for_each_plane_in_state(state, plane, plane_state, j) {
const struct drm_plane_helper_funcs *funcs;
- struct drm_plane *plane = state->planes[i];
- struct drm_plane_state *plane_state = state->plane_states[i];
- if (!plane)
+ if (j >= i)
continue;
funcs = plane->helper_private;
@@ -1564,37 +1565,28 @@ void drm_atomic_helper_swap_state(struct drm_device *dev,
struct drm_atomic_state *state)
{
int i;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
- for (i = 0; i < state->num_connector; i++) {
- struct drm_connector *connector = state->connectors[i];
-
- if (!connector)
- continue;
-
+ for_each_connector_in_state(state, connector, conn_state, i) {
connector->state->state = state;
- swap(state->connector_states[i], connector->state);
+ swap(state->connectors[i].state, connector->state);
connector->state->state = NULL;
}
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
-
- if (!crtc)
- continue;
-
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
crtc->state->state = state;
- swap(state->crtc_states[i], crtc->state);
+ swap(state->crtcs[i].state, crtc->state);
crtc->state->state = NULL;
}
- for (i = 0; i < dev->mode_config.num_total_plane; i++) {
- struct drm_plane *plane = state->planes[i];
-
- if (!plane)
- continue;
-
+ for_each_plane_in_state(state, plane, plane_state, i) {
plane->state->state = state;
- swap(state->plane_states[i], plane->state);
+ swap(state->planes[i].state, plane->state);
plane->state->state = NULL;
}
}
@@ -2409,7 +2401,7 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip);
* This is the main helper function provided by the atomic helper framework for
* implementing the legacy DPMS connector interface. It computes the new desired
* ->active state for the corresponding CRTC (if the connector is enabled) and
- * updates it.
+ * updates it.
*
* Returns:
* Returns 0 on success, negative errno numbers on failure.
@@ -2930,16 +2922,15 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
* @red: red correction table
* @green: green correction table
* @blue: green correction table
- * @start:
* @size: size of the tables
*
* Implements support for legacy gamma correction table for drivers
* that support color management through the DEGAMMA_LUT/GAMMA_LUT
* properties.
*/
-void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
- u16 *red, u16 *green, u16 *blue,
- uint32_t start, uint32_t size)
+int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue,
+ uint32_t size)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *config = &dev->mode_config;
@@ -2951,7 +2942,7 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
state = drm_atomic_state_alloc(crtc->dev);
if (!state)
- return;
+ return -ENOMEM;
blob = drm_property_create_blob(dev,
sizeof(struct drm_color_lut) * size,
@@ -3002,7 +2993,7 @@ retry:
drm_property_unreference_blob(blob);
- return;
+ return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
@@ -3010,7 +3001,7 @@ fail:
drm_atomic_state_free(state);
drm_property_unreference_blob(blob);
- return;
+ return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index b3654404abd0..255543086590 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -36,7 +36,7 @@
* encoder chain.
*
* A bridge is always attached to a single &drm_encoder at a time, but can be
- * either connected to it directly, or through an intermediate bridge:
+ * either connected to it directly, or through an intermediate bridge::
*
* encoder ---> bridge B ---> bridge A
*
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index b25c75981bd3..df91dfe506eb 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -535,7 +535,7 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private);
*
* Cleanup framebuffer. This function is intended to be used from the drivers
* ->destroy callback. It can also be used to clean up driver private
- * framebuffers embedded into a larger structure.
+ * framebuffers embedded into a larger structure.
*
* Note that this function does not remove the fb from active usuage - if it is
* still used anywhere, hilarity can ensue since userspace could call getfb on
@@ -692,7 +692,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
crtc->base.properties = &crtc->properties;
list_add_tail(&crtc->head, &config->crtc_list);
- config->num_crtc++;
+ crtc->index = config->num_crtc++;
crtc->primary = primary;
crtc->cursor = cursor;
@@ -722,6 +722,11 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
+ /* Note that the crtc_list is considered to be static; should we
+ * remove the drm_crtc at runtime we would have to decrement all
+ * the indices on the drm_crtc after us in the crtc_list.
+ */
+
kfree(crtc->gamma_store);
crtc->gamma_store = NULL;
@@ -741,29 +746,6 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_cleanup);
-/**
- * drm_crtc_index - find the index of a registered CRTC
- * @crtc: CRTC to find index for
- *
- * Given a registered CRTC, return the index of that CRTC within a DRM
- * device's list of CRTCs.
- */
-unsigned int drm_crtc_index(struct drm_crtc *crtc)
-{
- unsigned int index = 0;
- struct drm_crtc *tmp;
-
- drm_for_each_crtc(tmp, crtc->dev) {
- if (tmp == crtc)
- return index;
-
- index++;
- }
-
- BUG();
-}
-EXPORT_SYMBOL(drm_crtc_index);
-
/*
* drm_mode_remove - remove and free a mode
* @connector: connector list to modify
@@ -1166,7 +1148,7 @@ int drm_encoder_init(struct drm_device *dev,
}
list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
- dev->mode_config.num_encoder++;
+ encoder->index = dev->mode_config.num_encoder++;
out_put:
if (ret)
@@ -1180,29 +1162,6 @@ out_unlock:
EXPORT_SYMBOL(drm_encoder_init);
/**
- * drm_encoder_index - find the index of a registered encoder
- * @encoder: encoder to find index for
- *
- * Given a registered encoder, return the index of that encoder within a DRM
- * device's list of encoders.
- */
-unsigned int drm_encoder_index(struct drm_encoder *encoder)
-{
- unsigned int index = 0;
- struct drm_encoder *tmp;
-
- drm_for_each_encoder(tmp, encoder->dev) {
- if (tmp == encoder)
- return index;
-
- index++;
- }
-
- BUG();
-}
-EXPORT_SYMBOL(drm_encoder_index);
-
-/**
* drm_encoder_cleanup - cleans up an initialised encoder
* @encoder: encoder to cleanup
*
@@ -1212,6 +1171,11 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
+ /* Note that the encoder_list is considered to be static; should we
+ * remove the drm_encoder at runtime we would have to decrement all
+ * the indices on the drm_encoder after us in the encoder_list.
+ */
+
drm_modeset_lock_all(dev);
drm_mode_object_unregister(dev, &encoder->base);
kfree(encoder->name);
@@ -1300,7 +1264,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
plane->type = type;
list_add_tail(&plane->head, &config->plane_list);
- config->num_total_plane++;
+ plane->index = config->num_total_plane++;
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
config->num_overlay_plane++;
@@ -1374,6 +1338,11 @@ void drm_plane_cleanup(struct drm_plane *plane)
BUG_ON(list_empty(&plane->head));
+ /* Note that the plane_list is considered to be static; should we
+ * remove the drm_plane at runtime we would have to decrement all
+ * the indices on the drm_plane after us in the plane_list.
+ */
+
list_del(&plane->head);
dev->mode_config.num_total_plane--;
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
@@ -1391,29 +1360,6 @@ void drm_plane_cleanup(struct drm_plane *plane)
EXPORT_SYMBOL(drm_plane_cleanup);
/**
- * drm_plane_index - find the index of a registered plane
- * @plane: plane to find index for
- *
- * Given a registered plane, return the index of that CRTC within a DRM
- * device's list of planes.
- */
-unsigned int drm_plane_index(struct drm_plane *plane)
-{
- unsigned int index = 0;
- struct drm_plane *tmp;
-
- drm_for_each_plane(tmp, plane->dev) {
- if (tmp == plane)
- return index;
-
- index++;
- }
-
- BUG();
-}
-EXPORT_SYMBOL(drm_plane_index);
-
-/**
* drm_plane_from_index - find the registered plane at an index
* @dev: DRM device
* @idx: index of registered plane to find for
@@ -1425,13 +1371,11 @@ struct drm_plane *
drm_plane_from_index(struct drm_device *dev, int idx)
{
struct drm_plane *plane;
- unsigned int i = 0;
- drm_for_each_plane(plane, dev) {
- if (i == idx)
+ drm_for_each_plane(plane, dev)
+ if (idx == plane->index)
return plane;
- i++;
- }
+
return NULL;
}
EXPORT_SYMBOL(drm_plane_from_index);
@@ -2975,6 +2919,8 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
return PTR_ERR(fb);
}
+ fb->hot_x = req->hot_x;
+ fb->hot_y = req->hot_y;
} else {
fb = NULL;
}
@@ -5138,6 +5084,9 @@ EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size)
{
+ uint16_t *r_base, *g_base, *b_base;
+ int i;
+
crtc->gamma_size = gamma_size;
crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
@@ -5147,6 +5096,16 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
return -ENOMEM;
}
+ r_base = crtc->gamma_store;
+ g_base = r_base + gamma_size;
+ b_base = g_base + gamma_size;
+ for (i = 0; i < gamma_size; i++) {
+ r_base[i] = i << 8;
+ g_base[i] = i << 8;
+ b_base[i] = i << 8;
+ }
+
+
return 0;
}
EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
@@ -5214,7 +5173,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
goto out;
}
- crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+ ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
out:
drm_modeset_unlock_all(dev);
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 9a401aed98e0..622f788bff46 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -271,7 +271,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
* by commas, search through the list looking for one that
* matches the connector.
*
- * If there's one or more that don't't specify a connector, keep
+ * If there's one or more that doesn't specify a connector, keep
* the last one found one as a fallback.
*/
fwstr = kstrdup(edid_firmware, GFP_KERNEL);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 5075fae3c4e2..2e7ef0b325e2 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -23,6 +23,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#define DEFAULT_FBDEFIO_DELAY_MS 50
@@ -52,7 +53,7 @@ struct drm_fbdev_cma {
* will be set up automatically. dirty() is called by
* drm_fb_helper_deferred_io() in process context (struct delayed_work).
*
- * Example fbdev deferred io code:
+ * Example fbdev deferred io code::
*
* static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb,
* struct drm_file *file_priv,
@@ -162,6 +163,10 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
* drm_fb_cma_create_with_funcs() - helper function for the
* &drm_mode_config_funcs ->fb_create
* callback function
+ * @dev: DRM device
+ * @file_priv: drm file for the ioctl call
+ * @mode_cmd: metadata from the userspace fb creation request
+ * @funcs: vtable to be used for the new framebuffer object
*
* This can be used to set &drm_framebuffer_funcs for drivers that need the
* dirty() callback. Use drm_fb_cma_create() if you don't need to change
@@ -223,6 +228,9 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
/**
* drm_fb_cma_create() - &drm_mode_config_funcs ->fb_create callback function
+ * @dev: DRM device
+ * @file_priv: drm file for the ioctl call
+ * @mode_cmd: metadata from the userspace fb creation request
*
* If your hardware has special alignment or pitch requirements these should be
* checked before calling this function. Use drm_fb_cma_create_with_funcs() if
@@ -246,7 +254,7 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create);
* This function will usually be called from the CRTC callback functions.
*/
struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
- unsigned int plane)
+ unsigned int plane)
{
struct drm_fb_cma *fb_cma = to_fb_cma(fb);
@@ -258,10 +266,6 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
#ifdef CONFIG_DEBUG_FS
-/*
- * drm_fb_cma_describe() - Helper to dump information about a single
- * CMA framebuffer object
- */
static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
struct drm_fb_cma *fb_cma = to_fb_cma(fb);
@@ -279,7 +283,9 @@ static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
/**
* drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
- * in debugfs.
+ * in debugfs.
+ * @m: output file
+ * @arg: private data for the callback
*/
int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
{
@@ -297,6 +303,12 @@ int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
#endif
+static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ return dma_mmap_writecombine(info->device, vma, info->screen_base,
+ info->fix.smem_start, info->fix.smem_len);
+}
+
static struct fb_ops drm_fbdev_cma_ops = {
.owner = THIS_MODULE,
.fb_fillrect = drm_fb_helper_sys_fillrect,
@@ -307,6 +319,7 @@ static struct fb_ops drm_fbdev_cma_ops = {
.fb_blank = drm_fb_helper_blank,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_setcmap = drm_fb_helper_setcmap,
+ .fb_mmap = drm_fb_cma_mmap,
};
static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 7c2eb75db60f..ba5aac7276e4 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -227,7 +227,7 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
g_base = r_base + crtc->gamma_size;
b_base = g_base + crtc->gamma_size;
- crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+ crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
}
/**
@@ -1042,7 +1042,6 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
- int pindex;
if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
u32 *palette;
@@ -1074,38 +1073,10 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
!fb_helper->funcs->gamma_get))
return -EINVAL;
- pindex = regno;
+ WARN_ON(fb->bits_per_pixel != 8);
- if (fb->bits_per_pixel == 16) {
- pindex = regno << 3;
+ fb_helper->funcs->gamma_set(crtc, red, green, blue, regno);
- if (fb->depth == 16 && regno > 63)
- return -EINVAL;
- if (fb->depth == 15 && regno > 31)
- return -EINVAL;
-
- if (fb->depth == 16) {
- u16 r, g, b;
- int i;
- if (regno < 32) {
- for (i = 0; i < 8; i++)
- fb_helper->funcs->gamma_set(crtc, red,
- green, blue, pindex + i);
- }
-
- fb_helper->funcs->gamma_get(crtc, &r,
- &g, &b,
- pindex >> 1);
-
- for (i = 0; i < 4; i++)
- fb_helper->funcs->gamma_set(crtc, r,
- green, b,
- (pindex >> 1) + i);
- }
- }
-
- if (fb->depth != 16)
- fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
return 0;
}
@@ -2000,7 +1971,18 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
my_score++;
connector_funcs = connector->helper_private;
- encoder = connector_funcs->best_encoder(connector);
+
+ /*
+ * If the DRM device implements atomic hooks and ->best_encoder() is
+ * NULL we fallback to the default drm_atomic_helper_best_encoder()
+ * helper.
+ */
+ if (fb_helper->dev->mode_config.funcs->atomic_commit &&
+ !connector_funcs->best_encoder)
+ encoder = drm_atomic_helper_best_encoder(connector);
+ else
+ encoder = connector_funcs->best_encoder(connector);
+
if (!encoder)
goto out;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 7af7f8bcb355..64121f567234 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -67,7 +67,7 @@ DEFINE_MUTEX(drm_global_mutex);
* specific implementations. For GEM-based drivers this is drm_gem_mmap().
*
* No other file operations are supported by the DRM userspace API. Overall the
- * following is an example #file_operations structure:
+ * following is an example #file_operations structure::
*
* static const example_drm_fops = {
* .owner = THIS_MODULE,
@@ -368,7 +368,7 @@ static void drm_events_release(struct drm_file *file_priv)
/* Remove unconsumed events */
list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
list_del(&e->link);
- e->destroy(e);
+ kfree(e);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -636,7 +636,7 @@ put_back_event:
}
ret += length;
- e->destroy(e);
+ kfree(e);
}
}
mutex_unlock(&file_priv->event_read_lock);
@@ -713,9 +713,6 @@ int drm_event_reserve_init_locked(struct drm_device *dev,
list_add(&p->pending_link, &file_priv->pending_event_list);
p->file_priv = file_priv;
- /* we *could* pass this in as arg, but everyone uses kfree: */
- p->destroy = (void (*) (struct drm_pending_event *)) kfree;
-
return 0;
}
EXPORT_SYMBOL(drm_event_reserve_init_locked);
@@ -778,7 +775,7 @@ void drm_event_cancel_free(struct drm_device *dev,
list_del(&p->pending_link);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
- p->destroy(p);
+ kfree(p);
}
EXPORT_SYMBOL(drm_event_cancel_free);
@@ -800,8 +797,13 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
{
assert_spin_locked(&dev->event_lock);
+ if (e->fence) {
+ fence_signal(e->fence);
+ fence_put(e->fence);
+ }
+
if (!e->file_priv) {
- e->destroy(e);
+ kfree(e);
return;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 32156060b9c9..5c19dde1cd31 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -787,7 +787,7 @@ EXPORT_SYMBOL(drm_gem_object_release);
* @kref: kref of the object to free
*
* Called after the last reference to the object has been lost.
- * Must be called holding struct_ mutex
+ * Must be called holding &drm_device->struct_mutex.
*
* Frees the object
*/
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0fac801c18fe..4dc41ff388f9 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -42,10 +42,6 @@
#include <linux/vgaarb.h>
#include <linux/export.h>
-/* Access macro for slots in vblank timestamp ringbuffer. */
-#define vblanktimestamp(dev, pipe, count) \
- ((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE])
-
/* Retry timestamp calculation up to 3 times to satisfy
* drm_timestamp_precision before giving up.
*/
@@ -82,36 +78,18 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
struct timeval *t_vblank, u32 last)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- u32 tslot;
assert_spin_locked(&dev->vblank_time_lock);
vblank->last = last;
- /* All writers hold the spinlock, but readers are serialized by
- * the latching of vblank->count below.
- */
- tslot = vblank->count + vblank_count_inc;
- vblanktimestamp(dev, pipe, tslot) = *t_vblank;
-
- /*
- * vblank timestamp updates are protected on the write side with
- * vblank_time_lock, but on the read side done locklessly using a
- * sequence-lock on the vblank counter. Ensure correct ordering using
- * memory barrriers. We need the barrier both before and also after the
- * counter update to synchronize with the next timestamp write.
- * The read-side barriers for this are in drm_vblank_count_and_time.
- */
- smp_wmb();
+ write_seqlock(&vblank->seqlock);
+ vblank->time = *t_vblank;
vblank->count += vblank_count_inc;
- smp_wmb();
+ write_sequnlock(&vblank->seqlock);
}
-/**
- * drm_reset_vblank_timestamp - reset the last timestamp to the last vblank
- * @dev: DRM device
- * @pipe: index of CRTC for which to reset the timestamp
- *
+/*
* Reset the stored timestamp for the current vblank count to correspond
* to the last vblank occurred.
*
@@ -155,11 +133,7 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe
spin_unlock(&dev->vblank_time_lock);
}
-/**
- * drm_update_vblank_count - update the master vblank counter
- * @dev: DRM device
- * @pipe: counter to update
- *
+/*
* Call back into the driver to update the appropriate vblank counter
* (specified by @pipe). Deal with wraparound, if it occurred, and
* update the last read value so we can deal with wraparound on the next
@@ -205,7 +179,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
const struct timeval *t_old;
u64 diff_ns;
- t_old = &vblanktimestamp(dev, pipe, vblank->count);
+ t_old = &vblank->time;
diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old);
/*
@@ -239,49 +213,6 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
diff = 1;
}
- /*
- * FIMXE: Need to replace this hack with proper seqlocks.
- *
- * Restrict the bump of the software vblank counter to a safe maximum
- * value of +1 whenever there is the possibility that concurrent readers
- * of vblank timestamps could be active at the moment, as the current
- * implementation of the timestamp caching and updating is not safe
- * against concurrent readers for calls to store_vblank() with a bump
- * of anything but +1. A bump != 1 would very likely return corrupted
- * timestamps to userspace, because the same slot in the cache could
- * be concurrently written by store_vblank() and read by one of those
- * readers without the read-retry logic detecting the collision.
- *
- * Concurrent readers can exist when we are called from the
- * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
- * irq callers. However, all those calls to us are happening with the
- * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
- * can't increase while we are executing. Therefore a zero refcount at
- * this point is safe for arbitrary counter bumps if we are called
- * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
- * we must also accept a refcount of 1, as whenever we are called from
- * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
- * we must let that one pass through in order to not lose vblank counts
- * during vblank irq off - which would completely defeat the whole
- * point of this routine.
- *
- * Whenever we are called from vblank irq, we have to assume concurrent
- * readers exist or can show up any time during our execution, even if
- * the refcount is currently zero, as vblank irqs are usually only
- * enabled due to the presence of readers, and because when we are called
- * from vblank irq we can't hold the vbl_lock to protect us from sudden
- * bumps in vblank refcount. Therefore also restrict bumps to +1 when
- * called from vblank irq.
- */
- if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
- (flags & DRM_CALLED_FROM_VBLIRQ))) {
- DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
- "refcount %u, vblirq %u\n", pipe, diff,
- atomic_read(&vblank->refcount),
- (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
- diff = 1;
- }
-
DRM_DEBUG_VBL("updating vblank count on crtc %u:"
" current=%u, diff=%u, hw=%u hw_last=%u\n",
pipe, vblank->count, diff, cur_vblank, vblank->last);
@@ -303,6 +234,37 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
}
+/**
+ * drm_accurate_vblank_count - retrieve the master vblank counter
+ * @crtc: which counter to retrieve
+ *
+ * This function is similar to @drm_crtc_vblank_count but this
+ * function interpolates to handle a race with vblank irq's.
+ *
+ * This is mostly useful for hardware that can obtain the scanout
+ * position, but doesn't have a frame counter.
+ */
+u32 drm_accurate_vblank_count(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
+ u32 vblank;
+ unsigned long flags;
+
+ WARN(!dev->driver->get_vblank_timestamp,
+ "This function requires support for accurate vblank timestamps.");
+
+ spin_lock_irqsave(&dev->vblank_time_lock, flags);
+
+ drm_update_vblank_count(dev, pipe, 0);
+ vblank = drm_vblank_count(dev, pipe);
+
+ spin_unlock_irqrestore(&dev->vblank_time_lock, flags);
+
+ return vblank;
+}
+EXPORT_SYMBOL(drm_accurate_vblank_count);
+
/*
* Disable vblank irq's on crtc, make sure that last vblank count
* of hardware and corresponding consistent software vblank counter
@@ -417,6 +379,7 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
init_waitqueue_head(&vblank->queue);
setup_timer(&vblank->disable_timer, vblank_disable_fn,
(unsigned long)vblank);
+ seqlock_init(&vblank->seqlock);
}
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
@@ -986,25 +949,19 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
struct timeval *vblanktime)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- int count = DRM_TIMESTAMP_MAXRETRIES;
- u32 cur_vblank;
+ u32 vblank_count;
+ unsigned int seq;
if (WARN_ON(pipe >= dev->num_crtcs))
return 0;
- /*
- * Vblank timestamps are read lockless. To ensure consistency the vblank
- * counter is rechecked and ordering is ensured using memory barriers.
- * This works like a seqlock. The write-side barriers are in store_vblank.
- */
do {
- cur_vblank = vblank->count;
- smp_rmb();
- *vblanktime = vblanktimestamp(dev, pipe, cur_vblank);
- smp_rmb();
- } while (cur_vblank != vblank->count && --count > 0);
+ seq = read_seqbegin(&vblank->seqlock);
+ vblank_count = vblank->count;
+ *vblanktime = vblank->time;
+ } while (read_seqretry(&vblank->seqlock, seq));
- return cur_vblank;
+ return vblank_count;
}
EXPORT_SYMBOL(drm_vblank_count_and_time);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index f5d80839a90c..7938ce7ebed8 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -983,6 +983,28 @@ int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
/**
+ * mipi_dsi_set_tear_scanline() - turn on the display module's Tearing Effect
+ * output signal on the TE signal line when display module reaches line N
+ * defined by STS[n:0].
+ * @dsi: DSI peripheral device
+ * @param: STS[10:0]
+ * Return: 0 on success or a negative error code on failure
+ */
+int mipi_dsi_set_tear_scanline(struct mipi_dsi_device *dsi, u16 param)
+{
+ u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, param >> 8,
+ param & 0xff };
+ ssize_t err;
+
+ err = mipi_dsi_generic_write(dsi, payload, sizeof(payload));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_set_tear_scanline);
+
+/**
* mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
* data used by the interface
* @dsi: DSI peripheral device
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 04de6fd88f8c..cb39f45d6a16 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -179,12 +179,14 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
struct drm_mm_node *hole;
- u64 end = node->start + node->size;
+ u64 end;
u64 hole_start;
u64 hole_end;
BUG_ON(node == NULL);
+ end = node->start + node->size;
+
/* Find the relevant hole to add our node to */
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
if (hole_start > node->start || hole_end < end)
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index e5e6f504d8cc..fc5040ae5f25 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -544,6 +544,7 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
*
* This function is to create the modeline based on the GTF algorithm.
* Generalized Timing Formula is derived from:
+ *
* GTF Spreadsheet by Andy Morrish (1/5/97)
* available at http://www.vesa.org
*
@@ -552,7 +553,8 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
* I also refer to the function of fb_get_mode in the file of
* drivers/video/fbmon.c
*
- * Standard GTF parameters:
+ * Standard GTF parameters::
+ *
* M = 600
* C = 40
* K = 128
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index e3a4adf03e7b..61146f5b4f56 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -30,14 +30,14 @@
*
* As KMS moves toward more fine grained locking, and atomic ioctl where
* userspace can indirectly control locking order, it becomes necessary
- * to use ww_mutex and acquire-contexts to avoid deadlocks. But because
+ * to use &ww_mutex and acquire-contexts to avoid deadlocks. But because
* the locking is more distributed around the driver code, we want a bit
* of extra utility/tracking out of our acquire-ctx. This is provided
* by drm_modeset_lock / drm_modeset_acquire_ctx.
*
- * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt
+ * For basic principles of &ww_mutex, see: Documentation/locking/ww-mutex-design.txt
*
- * The basic usage pattern is to:
+ * The basic usage pattern is to::
*
* drm_modeset_acquire_init(&ctx)
* retry:
@@ -51,6 +51,13 @@
* ... do stuff ...
* drm_modeset_drop_locks(&ctx);
* drm_modeset_acquire_fini(&ctx);
+ *
+ * On top of of these per-object locks using &ww_mutex there's also an overall
+ * dev->mode_config.lock, for protecting everything else. Mostly this means
+ * probe state of connectors, and preventing hotplug add/removal of connectors.
+ *
+ * Finally there's a bunch of dedicated locks to protect drm core internal
+ * lists and lookup data structures.
*/
/**
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 369d2898ff9e..fc51306fe365 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -219,10 +219,12 @@ EXPORT_SYMBOL(drm_plane_helper_check_update);
*
* Note that we make some assumptions about hardware limitations that may not be
* true for all hardware --
- * 1) Primary plane cannot be repositioned.
- * 2) Primary plane cannot be scaled.
- * 3) Primary plane must cover the entire CRTC.
- * 4) Subpixel positioning is not supported.
+ *
+ * 1. Primary plane cannot be repositioned.
+ * 2. Primary plane cannot be scaled.
+ * 3. Primary plane must cover the entire CRTC.
+ * 4. Subpixel positioning is not supported.
+ *
* Drivers for hardware that don't have these restrictions can provide their
* own implementation rather than using this helper.
*
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 0329080d7f7c..a0df377d7d1c 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -82,13 +82,30 @@ drm_mode_validate_flag(const struct drm_display_mode *mode,
static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
{
+ struct drm_cmdline_mode *cmdline_mode;
struct drm_display_mode *mode;
- if (!connector->cmdline_mode.specified)
+ cmdline_mode = &connector->cmdline_mode;
+ if (!cmdline_mode->specified)
return 0;
+ /* Only add a GTF mode if we find no matching probed modes */
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ if (mode->hdisplay != cmdline_mode->xres ||
+ mode->vdisplay != cmdline_mode->yres)
+ continue;
+
+ if (cmdline_mode->refresh_specified) {
+ /* The probed mode's vrefresh is set until later */
+ if (drm_mode_vrefresh(mode) != cmdline_mode->refresh)
+ continue;
+ }
+
+ return 0;
+ }
+
mode = drm_mode_create_from_cmdline_mode(connector->dev,
- &connector->cmdline_mode);
+ cmdline_mode);
if (mode == NULL)
return 0;
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 2f2ecde8285b..f306c8855978 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -127,6 +127,9 @@ EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
* used to implement weakly referenced lookups using kref_get_unless_zero().
*
* Example:
+ *
+ * ::
+ *
* drm_vma_offset_lock_lookup(mgr);
* node = drm_vma_offset_lookup_locked(mgr);
* if (node)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index f5321e2f25ff..a69cdd526bf8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -125,7 +125,7 @@ struct etnaviv_gpu {
u32 completed_fence;
u32 retired_fence;
wait_queue_head_t fence_event;
- unsigned int fence_context;
+ u64 fence_context;
spinlock_t fence_spinlock;
/* worker for handling active-list retiring: */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 2dd820e23b0c..843b21c540b3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -267,6 +267,8 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
{
struct exynos_drm_private *priv = dev->dev_private;
struct exynos_atomic_commit *commit;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
int i, ret;
commit = kzalloc(sizeof(*commit), GFP_KERNEL);
@@ -288,10 +290,8 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
/* Wait until all affected CRTCs have completed previous commits and
* mark them as pending.
*/
- for (i = 0; i < dev->mode_config.num_crtc; ++i) {
- if (state->crtcs[i])
- commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
- }
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ commit->crtcs |= drm_crtc_mask(crtc);
wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs));
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 0ec1ad961e0d..33727d5d826a 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -198,7 +198,7 @@ static struct drm_driver fsl_dcu_drm_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = fsl_dcu_drm_enable_vblank,
.disable_vblank = fsl_dcu_drm_disable_vblank,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index c95406e6f44d..5b636bf0b467 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -175,20 +175,21 @@ void gma_crtc_load_lut(struct drm_crtc *crtc)
}
}
-void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
- u32 start, u32 size)
+int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
+ u32 size)
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int i;
- int end = (start + size > 256) ? 256 : start + size;
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
gma_crtc->lut_r[i] = red[i] >> 8;
gma_crtc->lut_g[i] = green[i] >> 8;
gma_crtc->lut_b[i] = blue[i] >> 8;
}
gma_crtc_load_lut(crtc);
+
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
index b2491c65f053..e72dd08b701b 100644
--- a/drivers/gpu/drm/gma500/gma_display.h
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -72,8 +72,8 @@ extern int gma_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t width, uint32_t height);
extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
extern void gma_crtc_load_lut(struct drm_crtc *crtc);
-extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, u32 start, u32 size);
+extern int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, u32 size);
extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode);
extern void gma_crtc_prepare(struct drm_crtc *crtc);
extern void gma_crtc_commit(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 398015be87e4..7b6c84925098 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -491,7 +491,6 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc;
int i;
- uint16_t *r_base, *g_base, *b_base;
/* We allocate a extra array of drm_connector pointers
* for fbdev after the crtc */
@@ -519,16 +518,10 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
gma_crtc->pipe = pipe;
gma_crtc->plane = pipe;
- r_base = gma_crtc->base.gamma_store;
- g_base = r_base + 256;
- b_base = g_base + 256;
for (i = 0; i < 256; i++) {
gma_crtc->lut_r[i] = i;
gma_crtc->lut_g[i] = i;
gma_crtc->lut_b[i] = i;
- r_base[i] = i << 8;
- g_base[i] = i << 8;
- b_base[i] = i << 8;
gma_crtc->lut_adj[i] = 0;
}
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 3f94785fbcca..193657259ee9 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -173,7 +173,7 @@ static struct drm_driver kirin_drm_driver = {
.fops = &kirin_drm_fops,
.set_busid = drm_platform_set_busid,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = kirin_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0b88ba0f3c1f..7e2944406b8f 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -59,6 +59,7 @@ i915-y += intel_audio.o \
intel_bios.o \
intel_color.o \
intel_display.o \
+ intel_dpio_phy.o \
intel_dpll_mgr.o \
intel_fbc.o \
intel_fifo_underrun.o \
@@ -81,10 +82,12 @@ i915-y += dvo_ch7017.o \
dvo_tfp410.o \
intel_crt.o \
intel_ddi.o \
+ intel_dp_aux_backlight.o \
intel_dp_link_training.o \
intel_dp_mst.o \
intel_dp.o \
intel_dsi.o \
+ intel_dsi_dcs_backlight.o \
intel_dsi_panel_vbt.o \
intel_dsi_pll.o \
intel_dvo.o \
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index a337f33bec5b..d97f28bfa9db 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -215,7 +215,8 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
- CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ),
+ CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
@@ -750,12 +751,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
int cmd_table_count;
int ret;
- if (!IS_GEN7(engine->dev))
+ if (!IS_GEN7(engine->i915))
return 0;
switch (engine->id) {
case RCS:
- if (IS_HASWELL(engine->dev)) {
+ if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_render_ring_cmds;
cmd_table_count =
ARRAY_SIZE(hsw_render_ring_cmds);
@@ -764,7 +765,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
}
- if (IS_HASWELL(engine->dev)) {
+ if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_render_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
} else {
@@ -780,7 +781,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
case BCS:
- if (IS_HASWELL(engine->dev)) {
+ if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_blt_ring_cmds;
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
} else {
@@ -788,7 +789,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
}
- if (IS_HASWELL(engine->dev)) {
+ if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
} else {
@@ -1035,7 +1036,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
if (!engine->needs_cmd_parser)
return false;
- if (!USES_PPGTT(engine->dev))
+ if (!USES_PPGTT(engine->i915))
return false;
return (i915.enable_cmd_parser == 1);
@@ -1098,6 +1099,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return false;
}
+ if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
+ DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n");
+ return false;
+ }
+
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
*oacontrol_set = (cmd[offset + 1] != 0);
}
@@ -1113,6 +1119,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return false;
}
+ if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
+ DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n",
+ reg_addr);
+ return false;
+ }
+
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
(offset + 2 > length ||
(cmd[offset + 1] & reg->mask) != reg->value)) {
@@ -1275,8 +1287,21 @@ int i915_parse_cmds(struct intel_engine_cs *engine,
*
* Return: the current version number of the cmd parser
*/
-int i915_cmd_parser_get_version(void)
+int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
{
+ struct intel_engine_cs *engine;
+ bool active = false;
+
+ /* If the command parser is not enabled, report 0 - unsupported */
+ for_each_engine(engine, dev_priv) {
+ if (i915_needs_cmd_parser(engine)) {
+ active = true;
+ break;
+ }
+ }
+ if (!active)
+ return 0;
+
/*
* Command parser version history
*
@@ -1288,6 +1313,7 @@ int i915_cmd_parser_get_version(void)
* 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
* 5. GPGPU dispatch compute indirect registers.
* 6. TIMESTAMP register and Haswell CS GPR registers
+ * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers.
*/
- return 6;
+ return 7;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 32690332d441..e4f2c55d9697 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -89,17 +89,17 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0;
}
-static const char get_active_flag(struct drm_i915_gem_object *obj)
+static char get_active_flag(struct drm_i915_gem_object *obj)
{
return obj->active ? '*' : ' ';
}
-static const char get_pin_flag(struct drm_i915_gem_object *obj)
+static char get_pin_flag(struct drm_i915_gem_object *obj)
{
return obj->pin_display ? 'p' : ' ';
}
-static const char get_tiling_flag(struct drm_i915_gem_object *obj)
+static char get_tiling_flag(struct drm_i915_gem_object *obj)
{
switch (obj->tiling_mode) {
default:
@@ -109,12 +109,12 @@ static const char get_tiling_flag(struct drm_i915_gem_object *obj)
}
}
-static inline const char get_global_flag(struct drm_i915_gem_object *obj)
+static char get_global_flag(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
}
-static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
+static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
{
return obj->mapping ? 'M' : ' ';
}
@@ -199,13 +199,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
}
-static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
-{
- seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
- seq_putc(m, ctx->remap_slice ? 'R' : 'r');
- seq_putc(m, ' ');
-}
-
static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
@@ -424,6 +417,42 @@ static void print_batch_pool_stats(struct seq_file *m,
print_file_stats(m, "[k]batch pool", stats);
}
+static int per_file_ctx_stats(int id, void *ptr, void *data)
+{
+ struct i915_gem_context *ctx = ptr;
+ int n;
+
+ for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
+ if (ctx->engine[n].state)
+ per_file_stats(0, ctx->engine[n].state, data);
+ if (ctx->engine[n].ringbuf)
+ per_file_stats(0, ctx->engine[n].ringbuf->obj, data);
+ }
+
+ return 0;
+}
+
+static void print_context_stats(struct seq_file *m,
+ struct drm_i915_private *dev_priv)
+{
+ struct file_stats stats;
+ struct drm_file *file;
+
+ memset(&stats, 0, sizeof(stats));
+
+ mutex_lock(&dev_priv->dev->struct_mutex);
+ if (dev_priv->kernel_context)
+ per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
+
+ list_for_each_entry(file, &dev_priv->dev->filelist, lhead) {
+ struct drm_i915_file_private *fpriv = file->driver_priv;
+ idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
+ }
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+
+ print_file_stats(m, "[k]contexts", stats);
+}
+
#define count_vmas(list, member) do { \
list_for_each_entry(vma, list, member) { \
size += i915_gem_obj_total_ggtt_size(vma->obj); \
@@ -528,10 +557,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv);
-
mutex_unlock(&dev->struct_mutex);
mutex_lock(&dev->filelist_mutex);
+ print_context_stats(m, dev_priv);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
struct task_struct *task;
@@ -607,18 +636,20 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
for_each_intel_crtc(dev, crtc) {
const char pipe = pipe_name(crtc->pipe);
const char plane = plane_name(crtc->plane);
- struct intel_unpin_work *work;
+ struct intel_flip_work *work;
spin_lock_irq(&dev->event_lock);
- work = crtc->unpin_work;
+ work = crtc->flip_work;
if (work == NULL) {
seq_printf(m, "No flip due on pipe %c (plane %c)\n",
pipe, plane);
} else {
+ u32 pending;
u32 addr;
- if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
- seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
+ pending = atomic_read(&work->pending);
+ if (pending) {
+ seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
pipe, plane);
} else {
seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
@@ -638,11 +669,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
work->flip_queued_vblank,
work->flip_ready_vblank,
- drm_crtc_vblank_count(&crtc->base));
- if (work->enable_stall_check)
- seq_puts(m, "Stall check enabled, ");
- else
- seq_puts(m, "Stall check waiting for page flip ioctl, ");
+ intel_crtc_get_vblank_counter(crtc));
seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
if (INTEL_INFO(dev)->gen >= 4)
@@ -1281,6 +1308,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
}
seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
+ seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n",
(gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
@@ -1383,7 +1411,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seqno[id] = engine->get_seqno(engine);
}
- i915_get_extra_instdone(dev, instdone);
+ i915_get_extra_instdone(dev_priv, instdone);
intel_runtime_pm_put(dev_priv);
@@ -1991,8 +2019,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
- struct intel_context *ctx;
- enum intel_engine_id id;
+ struct i915_gem_context *ctx;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2000,32 +2027,36 @@ static int i915_context_status(struct seq_file *m, void *unused)
return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link) {
- if (!i915.enable_execlists &&
- ctx->legacy_hw_ctx.rcs_state == NULL)
- continue;
-
- seq_puts(m, "HW context ");
- describe_ctx(m, ctx);
- if (ctx == dev_priv->kernel_context)
- seq_printf(m, "(kernel context) ");
+ seq_printf(m, "HW context %u ", ctx->hw_id);
+ if (IS_ERR(ctx->file_priv)) {
+ seq_puts(m, "(deleted) ");
+ } else if (ctx->file_priv) {
+ struct pid *pid = ctx->file_priv->file->pid;
+ struct task_struct *task;
- if (i915.enable_execlists) {
- seq_putc(m, '\n');
- for_each_engine_id(engine, dev_priv, id) {
- struct drm_i915_gem_object *ctx_obj =
- ctx->engine[id].state;
- struct intel_ringbuffer *ringbuf =
- ctx->engine[id].ringbuf;
-
- seq_printf(m, "%s: ", engine->name);
- if (ctx_obj)
- describe_obj(m, ctx_obj);
- if (ringbuf)
- describe_ctx_ringbuf(m, ringbuf);
- seq_putc(m, '\n');
+ task = get_pid_task(pid, PIDTYPE_PID);
+ if (task) {
+ seq_printf(m, "(%s [%d]) ",
+ task->comm, task->pid);
+ put_task_struct(task);
}
} else {
- describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
+ seq_puts(m, "(kernel) ");
+ }
+
+ seq_putc(m, ctx->remap_slice ? 'R' : 'r');
+ seq_putc(m, '\n');
+
+ for_each_engine(engine, dev_priv) {
+ struct intel_context *ce = &ctx->engine[engine->id];
+
+ seq_printf(m, "%s: ", engine->name);
+ seq_putc(m, ce->initialised ? 'I' : 'i');
+ if (ce->state)
+ describe_obj(m, ce->state);
+ if (ce->ringbuf)
+ describe_ctx_ringbuf(m, ce->ringbuf);
+ seq_putc(m, '\n');
}
seq_putc(m, '\n');
@@ -2037,24 +2068,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
}
static void i915_dump_lrc_obj(struct seq_file *m,
- struct intel_context *ctx,
+ struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
struct page *page;
uint32_t *reg_state;
int j;
- struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
unsigned long ggtt_offset = 0;
+ seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
+
if (ctx_obj == NULL) {
- seq_printf(m, "Context on %s with no gem object\n",
- engine->name);
+ seq_puts(m, "\tNot allocated\n");
return;
}
- seq_printf(m, "CONTEXT: %s %u\n", engine->name,
- intel_execlists_ctx_id(ctx, engine));
-
if (!i915_gem_obj_ggtt_bound(ctx_obj))
seq_puts(m, "\tNot bound in GGTT\n");
else
@@ -2087,7 +2116,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
int ret;
if (!i915.enable_execlists) {
@@ -2100,9 +2129,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link)
- if (ctx != dev_priv->kernel_context)
- for_each_engine(engine, dev_priv)
- i915_dump_lrc_obj(m, ctx, engine);
+ for_each_engine(engine, dev_priv)
+ i915_dump_lrc_obj(m, ctx, engine);
mutex_unlock(&dev->struct_mutex);
@@ -2173,8 +2201,8 @@ static int i915_execlists(struct seq_file *m, void *data)
seq_printf(m, "\t%d requests in queue\n", count);
if (head_req) {
- seq_printf(m, "\tHead request id: %u\n",
- intel_execlists_ctx_id(head_req->ctx, engine));
+ seq_printf(m, "\tHead request context: %u\n",
+ head_req->ctx->hw_id);
seq_printf(m, "\tHead request tail: %u\n",
head_req->tail);
}
@@ -2268,7 +2296,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
static int per_file_ctx(int id, void *ptr, void *data)
{
- struct intel_context *ctx = ptr;
+ struct i915_gem_context *ctx = ptr;
struct seq_file *m = data;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
@@ -2313,12 +2341,12 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
- if (INTEL_INFO(dev)->gen == 6)
+ if (IS_GEN6(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
for_each_engine(engine, dev_priv) {
seq_printf(m, "%s\n", engine->name);
- if (INTEL_INFO(dev)->gen == 7)
+ if (IS_GEN7(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n",
I915_READ(RING_MODE_GEN7(engine)));
seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
@@ -2509,6 +2537,7 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
client->wq_size, client->wq_offset, client->wq_tail);
+ seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
seq_printf(m, "\tLast submission result: %d\n", client->retcode);
@@ -3168,7 +3197,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
enum intel_engine_id id;
int j, ret;
- if (!i915_semaphore_is_enabled(dev)) {
+ if (!i915_semaphore_is_enabled(dev_priv)) {
seq_puts(m, "Semaphores are disabled\n");
return 0;
}
@@ -4769,7 +4798,7 @@ i915_wedged_set(void *data, u64 val)
intel_runtime_pm_get(dev_priv);
- i915_handle_error(dev, val,
+ i915_handle_error(dev_priv, val,
"Manually setting wedged to %llu", val);
intel_runtime_pm_put(dev_priv);
@@ -4919,7 +4948,7 @@ i915_drop_caches_set(void *data, u64 val)
}
if (val & (DROP_RETIRE | DROP_ACTIVE))
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev_priv);
if (val & DROP_BOUND)
i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
@@ -4993,7 +5022,7 @@ i915_max_freq_set(void *data, u64 val)
dev_priv->rps.max_freq_softlimit = val;
- intel_set_rps(dev, val);
+ intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -5060,7 +5089,7 @@ i915_min_freq_set(void *data, u64 val)
dev_priv->rps.min_freq_softlimit = val;
- intel_set_rps(dev, val);
+ intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index b3198fcd0536..07edaed9d5a2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -186,7 +186,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = 1;
break;
case I915_PARAM_HAS_SEMAPHORES:
- value = i915_semaphore_is_enabled(dev);
+ value = i915_semaphore_is_enabled(dev_priv);
break;
case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
value = 1;
@@ -204,7 +204,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = 1;
break;
case I915_PARAM_CMD_PARSER_VERSION:
- value = i915_cmd_parser_get_version();
+ value = i915_cmd_parser_get_version(dev_priv);
break;
case I915_PARAM_HAS_COHERENT_PHYS_GTT:
value = 1;
@@ -223,8 +223,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
return -ENODEV;
break;
case I915_PARAM_HAS_GPU_RESET:
- value = i915.enable_hangcheck &&
- intel_has_gpu_reset(dev);
+ value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
break;
case I915_PARAM_HAS_RESOURCE_STREAMER:
value = HAS_RESOURCE_STREAMER(dev);
@@ -425,6 +424,43 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
.can_switch = i915_switcheroo_can_switch,
};
+static void i915_gem_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ /*
+ * Neither the BIOS, ourselves or any other kernel
+ * expects the system to be in execlists mode on startup,
+ * so we need to reset the GPU back to legacy mode. And the only
+ * known way to disable logical contexts is through a GPU reset.
+ *
+ * So in order to leave the system in a known default configuration,
+ * always reset the GPU upon unload. Afterwards we then clean up the
+ * GEM state tracking, flushing off the requests and leaving the
+ * system in a known idle state.
+ *
+ * Note that is of the upmost importance that the GPU is idle and
+ * all stray writes are flushed *before* we dismantle the backing
+ * storage for the pinned objects.
+ *
+ * However, since we are uncertain that reseting the GPU on older
+ * machines is a good idea, we don't - just in case it leaves the
+ * machine in an unusable condition.
+ */
+ if (HAS_HW_CONTEXTS(dev)) {
+ int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
+ WARN_ON(reset && reset != -ENODEV);
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_reset(dev);
+ i915_gem_cleanup_engines(dev);
+ i915_gem_context_fini(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ WARN_ON(!list_empty(&to_i915(dev)->context_list));
+}
+
static int i915_load_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -454,6 +490,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_vga_client;
+ /* must happen before intel_power_domains_init_hw() on VLV/CHV */
+ intel_update_rawclk(dev_priv);
+
intel_power_domains_init_hw(dev_priv, false);
intel_csr_ucode_init(dev_priv);
@@ -468,7 +507,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
- intel_guc_ucode_init(dev);
+ intel_guc_init(dev);
ret = i915_gem_init(dev);
if (ret)
@@ -503,12 +542,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
return 0;
cleanup_gem:
- mutex_lock(&dev->struct_mutex);
- i915_gem_cleanup_engines(dev);
- i915_gem_context_fini(dev);
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_fini(dev);
cleanup_irq:
- intel_guc_ucode_fini(dev);
+ intel_guc_fini(dev);
drm_irq_uninstall(dev);
intel_teardown_gmbus(dev);
cleanup_csr:
@@ -850,7 +886,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
DRM_INFO("Display disabled (module parameter)\n");
info->num_pipes = 0;
} else if (info->num_pipes > 0 &&
- (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
+ (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
HAS_PCH_SPLIT(dev)) {
u32 fuse_strap = I915_READ(FUSE_STRAP);
u32 sfuse_strap = I915_READ(SFUSE_STRAP);
@@ -874,7 +910,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
DRM_INFO("PipeC fused off\n");
info->num_pipes -= 1;
}
- } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) {
+ } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
u32 dfsm = I915_READ(SKL_DFSM);
u8 disabled_mask = 0;
bool invalid;
@@ -915,9 +951,11 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
else if (INTEL_INFO(dev)->gen >= 9)
gen9_sseu_info_init(dev);
- /* Snooping is broken on BXT A stepping. */
info->has_snoop = !info->has_llc;
- info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1);
+
+ /* Snooping is broken on BXT A stepping. */
+ if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ info->has_snoop = false;
DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
@@ -930,6 +968,20 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
info->has_subslice_pg ? "y" : "n");
DRM_DEBUG_DRIVER("has EU power gating: %s\n",
info->has_eu_pg ? "y" : "n");
+
+ i915.enable_execlists =
+ intel_sanitize_enable_execlists(dev_priv,
+ i915.enable_execlists);
+
+ /*
+ * i915.enable_ppgtt is read-only, so do an early pass to validate the
+ * user's requested state against the hardware/driver capabilities. We
+ * do this now so that we can print out any log messages once rather
+ * than every time we check intel_enable_ppgtt().
+ */
+ i915.enable_ppgtt =
+ intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
+ DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
}
static void intel_init_dpio(struct drm_i915_private *dev_priv)
@@ -1020,6 +1072,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
memcpy(device_info, info, sizeof(dev_priv->info));
device_info->device_id = dev->pdev->device;
+ BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
+ device_info->gen_mask = BIT(device_info->gen - 1);
+
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
mutex_init(&dev_priv->backlight_lock);
@@ -1137,7 +1192,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
if (ret < 0)
goto put_bridge;
- intel_uncore_init(dev);
+ intel_uncore_init(dev_priv);
return 0;
@@ -1155,7 +1210,7 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- intel_uncore_fini(dev);
+ intel_uncore_fini(dev_priv);
i915_mmio_cleanup(dev);
pci_dev_put(dev_priv->bridge_dev);
}
@@ -1206,8 +1261,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
pci_set_master(dev->pdev);
/* overlay on gen2 is broken and can't address above 1G */
- if (IS_GEN2(dev))
- dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+ if (IS_GEN2(dev)) {
+ ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+ if (ret) {
+ DRM_ERROR("failed to set DMA mask\n");
+
+ goto out_ggtt;
+ }
+ }
+
/* 965GM sometimes incorrectly writes to hardware status page (HWS)
* using 32bit addressing, overwriting memory if HWS is located
@@ -1217,8 +1279,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* behaviour if any general state is accessed within a page above 4GB,
* which also needs to be handled carefully.
*/
- if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
- dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
+ ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+
+ if (ret) {
+ DRM_ERROR("failed to set DMA mask\n");
+
+ goto out_ggtt;
+ }
+ }
aperture_size = ggtt->mappable_end;
@@ -1236,9 +1305,9 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
- intel_uncore_sanitize(dev);
+ intel_uncore_sanitize(dev_priv);
- intel_opregion_setup(dev);
+ intel_opregion_setup(dev_priv);
i915_gem_load_init_fences(dev_priv);
@@ -1300,14 +1369,14 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
* Notify a valid surface after modesetting,
* when running inside a VM.
*/
- if (intel_vgpu_active(dev))
+ if (intel_vgpu_active(dev_priv))
I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
i915_setup_sysfs(dev);
if (INTEL_INFO(dev_priv)->num_pipes) {
/* Must be done after probing outputs */
- intel_opregion_init(dev);
+ intel_opregion_register(dev_priv);
acpi_video_register();
}
@@ -1326,7 +1395,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
i915_audio_component_cleanup(dev_priv);
intel_gpu_ips_teardown();
acpi_video_unregister();
- intel_opregion_fini(dev_priv->dev);
+ intel_opregion_unregister(dev_priv);
i915_teardown_sysfs(dev_priv->dev);
i915_gem_shrinker_cleanup(dev_priv);
}
@@ -1458,11 +1527,8 @@ int i915_driver_unload(struct drm_device *dev)
/* Flush any outstanding unpin_work. */
flush_workqueue(dev_priv->wq);
- intel_guc_ucode_fini(dev);
- mutex_lock(&dev->struct_mutex);
- i915_gem_cleanup_engines(dev);
- i915_gem_context_fini(dev);
- mutex_unlock(&dev->struct_mutex);
+ intel_guc_fini(dev);
+ i915_gem_fini(dev);
intel_fbc_cleanup_cfb(dev_priv);
intel_power_domains_fini(dev_priv);
@@ -1570,15 +1636,15 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f313b4d8344f..872c60608dbd 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -35,11 +35,9 @@
#include "i915_trace.h"
#include "intel_drv.h"
-#include <linux/apple-gmux.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
-#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_crtc_helper.h>
@@ -300,22 +298,26 @@ static const struct intel_device_info intel_haswell_m_info = {
static const struct intel_device_info intel_broadwell_d_info = {
BDW_FEATURES,
.gen = 8,
+ .is_broadwell = 1,
};
static const struct intel_device_info intel_broadwell_m_info = {
BDW_FEATURES,
.gen = 8, .is_mobile = 1,
+ .is_broadwell = 1,
};
static const struct intel_device_info intel_broadwell_gt3d_info = {
BDW_FEATURES,
.gen = 8,
+ .is_broadwell = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
static const struct intel_device_info intel_broadwell_gt3m_info = {
BDW_FEATURES,
.gen = 8, .is_mobile = 1,
+ .is_broadwell = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
@@ -530,9 +532,9 @@ void intel_detect_pch(struct drm_device *dev)
pci_dev_put(pch);
}
-bool i915_semaphore_is_enabled(struct drm_device *dev)
+bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return false;
if (i915.semaphores >= 0)
@@ -542,13 +544,9 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
if (i915.enable_execlists)
return false;
- /* Until we get further testing... */
- if (IS_GEN8(dev))
- return false;
-
#ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */
- if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+ if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
return false;
#endif
@@ -610,7 +608,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_guc_suspend(dev);
- intel_suspend_gt_powersave(dev);
+ intel_suspend_gt_powersave(dev_priv);
intel_display_suspend(dev);
@@ -628,10 +626,10 @@ static int i915_drm_suspend(struct drm_device *dev)
i915_save_state(dev);
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
- intel_opregion_notify_adapter(dev, opregion_target_state);
+ intel_opregion_notify_adapter(dev_priv, opregion_target_state);
- intel_uncore_forcewake_reset(dev, false);
- intel_opregion_fini(dev);
+ intel_uncore_forcewake_reset(dev_priv, false);
+ intel_opregion_unregister(dev_priv);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
@@ -749,7 +747,7 @@ static int i915_drm_resume(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
i915_restore_state(dev);
- intel_opregion_setup(dev);
+ intel_opregion_setup(dev_priv);
intel_init_pch_refclk(dev);
drm_mode_config_reset(dev);
@@ -777,7 +775,7 @@ static int i915_drm_resume(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
+ dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
intel_dp_mst_resume(dev);
@@ -794,7 +792,7 @@ static int i915_drm_resume(struct drm_device *dev)
/* Config may have changed between suspend and resume */
drm_helper_hpd_irq_event(dev);
- intel_opregion_init(dev);
+ intel_opregion_register(dev_priv);
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
@@ -802,7 +800,7 @@ static int i915_drm_resume(struct drm_device *dev)
dev_priv->modeset_restore = MODESET_DONE;
mutex_unlock(&dev_priv->modeset_restore_lock);
- intel_opregion_notify_adapter(dev, PCI_D0);
+ intel_opregion_notify_adapter(dev_priv, PCI_D0);
drm_kms_helper_poll_enable(dev);
@@ -870,9 +868,9 @@ static int i915_drm_resume_early(struct drm_device *dev)
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
ret);
- intel_uncore_early_sanitize(dev, true);
+ intel_uncore_early_sanitize(dev_priv, true);
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
if (!dev_priv->suspended_to_idle)
gen9_sanitize_dc_state(dev_priv);
bxt_disable_dc9(dev_priv);
@@ -880,7 +878,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
hsw_disable_pc8(dev_priv);
}
- intel_uncore_sanitize(dev);
+ intel_uncore_sanitize(dev_priv);
if (IS_BROXTON(dev_priv) ||
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
@@ -923,14 +921,14 @@ int i915_resume_switcheroo(struct drm_device *dev)
* - re-init interrupt state
* - re-init display
*/
-int i915_reset(struct drm_device *dev)
+int i915_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = dev_priv->dev;
struct i915_gpu_error *error = &dev_priv->gpu_error;
unsigned reset_counter;
int ret;
- intel_reset_gt_powersave(dev);
+ intel_reset_gt_powersave(dev_priv);
mutex_lock(&dev->struct_mutex);
@@ -946,7 +944,7 @@ int i915_reset(struct drm_device *dev)
i915_gem_reset(dev);
- ret = intel_gpu_reset(dev, ALL_ENGINES);
+ ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
/* Also reset the gpu hangman. */
if (error->stop_rings != 0) {
@@ -1001,7 +999,7 @@ int i915_reset(struct drm_device *dev)
* of re-init after reset.
*/
if (INTEL_INFO(dev)->gen > 5)
- intel_enable_gt_powersave(dev);
+ intel_enable_gt_powersave(dev_priv);
return 0;
@@ -1030,13 +1028,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (PCI_FUNC(pdev->devfn))
return -ENODEV;
- /*
- * apple-gmux is needed on dual GPU MacBook Pro
- * to probe the panel if we're the inactive GPU.
- */
- if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
- apple_gmux_present() && pdev != vga_default_device() &&
- !vga_switcheroo_handler_flags())
+ if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
return drm_get_pci_dev(pdev, ent, &driver);
@@ -1115,6 +1107,49 @@ static int i915_pm_resume(struct device *dev)
return i915_drm_resume(drm_dev);
}
+/* freeze: before creating the hibernation_image */
+static int i915_pm_freeze(struct device *dev)
+{
+ return i915_pm_suspend(dev);
+}
+
+static int i915_pm_freeze_late(struct device *dev)
+{
+ int ret;
+
+ ret = i915_pm_suspend_late(dev);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_freeze_late(dev_to_i915(dev));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* thaw: called after creating the hibernation image, but before turning off. */
+static int i915_pm_thaw_early(struct device *dev)
+{
+ return i915_pm_resume_early(dev);
+}
+
+static int i915_pm_thaw(struct device *dev)
+{
+ return i915_pm_resume(dev);
+}
+
+/* restore: called after loading the hibernation image. */
+static int i915_pm_restore_early(struct device *dev)
+{
+ return i915_pm_resume_early(dev);
+}
+
+static int i915_pm_restore(struct device *dev)
+{
+ return i915_pm_resume(dev);
+}
+
/*
* Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is
@@ -1478,7 +1513,7 @@ static int intel_runtime_suspend(struct device *device)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
+ if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
return -ENODEV;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
@@ -1517,7 +1552,7 @@ static int intel_runtime_suspend(struct device *device)
intel_guc_suspend(dev);
- intel_suspend_gt_powersave(dev);
+ intel_suspend_gt_powersave(dev_priv);
intel_runtime_pm_disable_interrupts(dev_priv);
ret = 0;
@@ -1539,7 +1574,7 @@ static int intel_runtime_suspend(struct device *device)
return ret;
}
- intel_uncore_forcewake_reset(dev, false);
+ intel_uncore_forcewake_reset(dev_priv, false);
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
@@ -1553,14 +1588,14 @@ static int intel_runtime_suspend(struct device *device)
* FIXME: We really should find a document that references the arguments
* used below!
*/
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev_priv)) {
/*
* On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
* being detected, and the call we do at intel_runtime_resume()
* won't be able to restore them. Since PCI_D3hot matches the
* actual specification and appears to be working, use it.
*/
- intel_opregion_notify_adapter(dev, PCI_D3hot);
+ intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
} else {
/*
* current versions of firmware which depend on this opregion
@@ -1569,7 +1604,7 @@ static int intel_runtime_suspend(struct device *device)
* to distinguish it from notifications that might be sent via
* the suspend path.
*/
- intel_opregion_notify_adapter(dev, PCI_D1);
+ intel_opregion_notify_adapter(dev_priv, PCI_D1);
}
assert_forcewakes_inactive(dev_priv);
@@ -1593,7 +1628,7 @@ static int intel_runtime_resume(struct device *device)
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
disable_rpm_wakeref_asserts(dev_priv);
- intel_opregion_notify_adapter(dev, PCI_D0);
+ intel_opregion_notify_adapter(dev_priv, PCI_D0);
dev_priv->pm.suspended = false;
if (intel_uncore_unclaimed_mmio(dev_priv))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
@@ -1620,7 +1655,7 @@ static int intel_runtime_resume(struct device *device)
* we can do is to hope that things will still work (and disable RPM).
*/
i915_gem_init_swizzling(dev);
- gen6_update_ring_freq(dev);
+ gen6_update_ring_freq(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -1632,7 +1667,7 @@ static int intel_runtime_resume(struct device *device)
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_init(dev_priv);
- intel_enable_gt_powersave(dev);
+ intel_enable_gt_powersave(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
@@ -1669,14 +1704,14 @@ static const struct dev_pm_ops i915_pm_ops = {
* @restore, @restore_early : called after rebooting and restoring the
* hibernation image [PMSG_RESTORE]
*/
- .freeze = i915_pm_suspend,
- .freeze_late = i915_pm_suspend_late,
- .thaw_early = i915_pm_resume_early,
- .thaw = i915_pm_resume,
+ .freeze = i915_pm_freeze,
+ .freeze_late = i915_pm_freeze_late,
+ .thaw_early = i915_pm_thaw_early,
+ .thaw = i915_pm_thaw,
.poweroff = i915_pm_suspend,
.poweroff_late = i915_pm_poweroff_late,
- .restore_early = i915_pm_resume_early,
- .restore = i915_pm_resume,
+ .restore_early = i915_pm_restore_early,
+ .restore = i915_pm_restore,
/* S0ix (via runtime suspend) event handlers */
.runtime_suspend = intel_runtime_suspend,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5faacc6e548d..0113207967d9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -66,7 +66,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20160425"
+#define DRIVER_DATE "20160606"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -324,6 +324,12 @@ struct i915_hotplug {
&dev->mode_config.plane_list, \
base.head)
+#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
+ list_for_each_entry(intel_plane, &dev->mode_config.plane_list, \
+ base.head) \
+ for_each_if ((plane_mask) & \
+ (1 << drm_plane_index(&intel_plane->base)))
+
#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
list_for_each_entry(intel_plane, \
&(dev)->mode_config.plane_list, \
@@ -333,6 +339,10 @@ struct i915_hotplug {
#define for_each_intel_crtc(dev, intel_crtc) \
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
+#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) \
+ for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
+
#define for_each_intel_encoder(dev, intel_encoder) \
list_for_each_entry(intel_encoder, \
&(dev)->mode_config.encoder_list, \
@@ -588,6 +598,7 @@ struct drm_i915_display_funcs {
struct intel_crtc_state *newstate);
void (*initial_watermarks)(struct intel_crtc_state *cstate);
void (*optimize_watermarks)(struct intel_crtc_state *cstate);
+ int (*compute_global_watermarks)(struct drm_atomic_state *state);
void (*update_wm)(struct drm_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
@@ -612,7 +623,7 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
uint32_t flags);
- void (*hpd_irq_setup)(struct drm_device *dev);
+ void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -735,6 +746,7 @@ struct intel_csr {
func(is_valleyview) sep \
func(is_cherryview) sep \
func(is_haswell) sep \
+ func(is_broadwell) sep \
func(is_skylake) sep \
func(is_broxton) sep \
func(is_kabylake) sep \
@@ -757,9 +769,10 @@ struct intel_csr {
struct intel_device_info {
u32 display_mmio_offset;
u16 device_id;
- u8 num_pipes:3;
+ u8 num_pipes;
u8 num_sprites[I915_MAX_PIPES];
u8 gen;
+ u16 gen_mask;
u8 ring_mask; /* Rings supported by the HW */
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
/* Register offsets for the various display pipes and transcoders */
@@ -821,9 +834,8 @@ struct i915_ctx_hang_stats {
/* This must match up with the value previously used for execbuf2.rsvd1. */
#define DEFAULT_CONTEXT_HANDLE 0
-#define CONTEXT_NO_ZEROMAP (1<<0)
/**
- * struct intel_context - as the name implies, represents a context.
+ * struct i915_gem_context - as the name implies, represents a context.
* @ref: reference count.
* @user_handle: userspace tracking identity for this context.
* @remap_slice: l3 row remapping information.
@@ -841,33 +853,33 @@ struct i915_ctx_hang_stats {
* Contexts are memory images used by the hardware to store copies of their
* internal state.
*/
-struct intel_context {
+struct i915_gem_context {
struct kref ref;
- int user_handle;
- uint8_t remap_slice;
struct drm_i915_private *i915;
- int flags;
struct drm_i915_file_private *file_priv;
- struct i915_ctx_hang_stats hang_stats;
struct i915_hw_ppgtt *ppgtt;
- /* Legacy ring buffer submission */
- struct {
- struct drm_i915_gem_object *rcs_state;
- bool initialized;
- } legacy_hw_ctx;
+ struct i915_ctx_hang_stats hang_stats;
- /* Execlists */
- struct {
+ /* Unique identifier for this context, used by the hw for tracking */
+ unsigned long flags;
+ unsigned hw_id;
+ u32 user_handle;
+#define CONTEXT_NO_ZEROMAP (1<<0)
+
+ struct intel_context {
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
- int pin_count;
struct i915_vma *lrc_vma;
- u64 lrc_desc;
uint32_t *lrc_reg_state;
+ u64 lrc_desc;
+ int pin_count;
+ bool initialised;
} engine[I915_NUM_ENGINES];
struct list_head link;
+
+ u8 remap_slice;
};
enum fb_op_origin {
@@ -1115,6 +1127,8 @@ struct intel_gen6_power_mgmt {
bool interrupts_enabled;
u32 pm_iir;
+ u32 pm_intr_keep;
+
/* Frequencies are stored in potentially platform dependent multiples.
* In other words, *_freq needs to be multiplied by X to be interesting.
* Soft limits are those which are used for the dynamic reclocking done
@@ -1488,6 +1502,7 @@ struct intel_vbt_data {
bool present;
bool active_low_pwm;
u8 min_brightness; /* min_brightness/255 of max */
+ enum intel_backlight_type type;
} backlight;
/* MIPI DSI */
@@ -1580,7 +1595,7 @@ struct skl_ddb_allocation {
};
struct skl_wm_values {
- bool dirty[I915_MAX_PIPES];
+ unsigned dirty_pipes;
struct skl_ddb_allocation ddb;
uint32_t wm_linetime[I915_MAX_PIPES];
uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
@@ -1697,7 +1712,7 @@ struct i915_execbuffer_params {
uint64_t batch_obj_vm_offset;
struct intel_engine_cs *engine;
struct drm_i915_gem_object *batch_obj;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
struct drm_i915_gem_request *request;
};
@@ -1747,6 +1762,7 @@ struct drm_i915_private {
wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev;
+ struct i915_gem_context *kernel_context;
struct intel_engine_cs engine[I915_NUM_ENGINES];
struct drm_i915_gem_object *semaphore_obj;
uint32_t last_seqno, next_seqno;
@@ -1802,13 +1818,17 @@ struct drm_i915_private {
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
unsigned int fsb_freq, mem_freq, is_ddr3;
- unsigned int skl_boot_cdclk;
+ unsigned int skl_preferred_vco_freq;
unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
unsigned int max_dotclk_freq;
unsigned int rawclk_freq;
unsigned int hpll_freq;
unsigned int czclk_freq;
+ struct {
+ unsigned int vco, ref;
+ } cdclk_pll;
+
/**
* wq - Driver workqueue for GEM.
*
@@ -1838,6 +1858,13 @@ struct drm_i915_private {
DECLARE_HASHTABLE(mm_structs, 7);
struct mutex mm_lock;
+ /* The hw wants to have a stable context identifier for the lifetime
+ * of the context (for OA, PASID, faults, etc). This is limited
+ * in execlists to 21 bits.
+ */
+ struct ida context_hw_ida;
+#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+
/* Kernel Modesetting */
struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
@@ -1950,9 +1977,6 @@ struct drm_i915_private {
*/
uint16_t skl_latency[8];
- /* Committed wm config */
- struct intel_wm_config config;
-
/*
* The skl_wm_values structure is a bit too big for stack
* allocation, so we keep the staging struct where we store
@@ -1975,6 +1999,13 @@ struct drm_i915_private {
* cstate->wm.need_postvbl_update.
*/
struct mutex wm_mutex;
+
+ /*
+ * Set during HW readout of watermarks/DDB. Some platforms
+ * need to know when we're still using BIOS-provided values
+ * (which we don't fully trust).
+ */
+ bool distrust_bios_wm;
} wm;
struct i915_runtime_pm pm;
@@ -1989,8 +2020,6 @@ struct drm_i915_private {
void (*stop_engine)(struct intel_engine_cs *engine);
} gt;
- struct intel_context *kernel_context;
-
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
@@ -2227,9 +2256,75 @@ struct drm_i915_gem_object {
};
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
-void i915_gem_track_fb(struct drm_i915_gem_object *old,
- struct drm_i915_gem_object *new,
- unsigned frontbuffer_bits);
+/*
+ * Optimised SGL iterator for GEM objects
+ */
+static __always_inline struct sgt_iter {
+ struct scatterlist *sgp;
+ union {
+ unsigned long pfn;
+ dma_addr_t dma;
+ };
+ unsigned int curr;
+ unsigned int max;
+} __sgt_iter(struct scatterlist *sgl, bool dma) {
+ struct sgt_iter s = { .sgp = sgl };
+
+ if (s.sgp) {
+ s.max = s.curr = s.sgp->offset;
+ s.max += s.sgp->length;
+ if (dma)
+ s.dma = sg_dma_address(s.sgp);
+ else
+ s.pfn = page_to_pfn(sg_page(s.sgp));
+ }
+
+ return s;
+}
+
+/**
+ * __sg_next - return the next scatterlist entry in a list
+ * @sg: The current sg entry
+ *
+ * Description:
+ * If the entry is the last, return NULL; otherwise, step to the next
+ * element in the array (@sg@+1). If that's a chain pointer, follow it;
+ * otherwise just return the pointer to the current element.
+ **/
+static inline struct scatterlist *__sg_next(struct scatterlist *sg)
+{
+#ifdef CONFIG_DEBUG_SG
+ BUG_ON(sg->sg_magic != SG_MAGIC);
+#endif
+ return sg_is_last(sg) ? NULL :
+ likely(!sg_is_chain(++sg)) ? sg :
+ sg_chain_ptr(sg);
+}
+
+/**
+ * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
+ * @__dmap: DMA address (output)
+ * @__iter: 'struct sgt_iter' (iterator state, internal)
+ * @__sgt: sg_table to iterate over (input)
+ */
+#define for_each_sgt_dma(__dmap, __iter, __sgt) \
+ for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
+ ((__dmap) = (__iter).dma + (__iter).curr); \
+ (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
+ ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0))
+
+/**
+ * for_each_sgt_page - iterate over the pages of the given sg_table
+ * @__pp: page pointer (output)
+ * @__iter: 'struct sgt_iter' (iterator state, internal)
+ * @__sgt: sg_table to iterate over (input)
+ */
+#define for_each_sgt_page(__pp, __iter, __sgt) \
+ for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
+ ((__pp) = (__iter).pfn == 0 ? NULL : \
+ pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
+ (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
+ ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
/**
* Request queue structure.
@@ -2278,6 +2373,9 @@ struct drm_i915_gem_request {
/** Position in the ringbuffer of the end of the whole request */
u32 tail;
+ /** Preallocate space in the ringbuffer for the emitting the request */
+ u32 reserved_space;
+
/**
* Context and ring buffer related to this request
* Contexts are refcounted, so when this request is associated with a
@@ -2288,9 +2386,20 @@ struct drm_i915_gem_request {
* i915_gem_request_free() will then decrement the refcount on the
* context.
*/
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
struct intel_ringbuffer *ringbuf;
+ /**
+ * Context related to the previous request.
+ * As the contexts are accessed by the hardware until the switch is
+ * completed to a new context, the hardware may still be writing
+ * to the context object after the breadcrumb is visible. We must
+ * not unpin/unbind/prune that object whilst still active and so
+ * we keep the previous context pinned until the following (this)
+ * request is retired.
+ */
+ struct i915_gem_context *previous_context;
+
/** Batch buffer related to this request if any (used for
error state dump only) */
struct drm_i915_gem_object *batch_obj;
@@ -2327,11 +2436,13 @@ struct drm_i915_gem_request {
/** Execlists no. of times this request has been sent to the ELSP */
int elsp_submitted;
+ /** Execlists context hardware id. */
+ unsigned ctx_hw_id;
};
struct drm_i915_gem_request * __must_check
i915_gem_request_alloc(struct intel_engine_cs *engine,
- struct intel_context *ctx);
+ struct i915_gem_context *ctx);
void i915_gem_request_free(struct kref *req_ref);
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file);
@@ -2359,23 +2470,9 @@ i915_gem_request_reference(struct drm_i915_gem_request *req)
static inline void
i915_gem_request_unreference(struct drm_i915_gem_request *req)
{
- WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex));
kref_put(&req->ref, i915_gem_request_free);
}
-static inline void
-i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
-{
- struct drm_device *dev;
-
- if (!req)
- return;
-
- dev = req->engine->dev;
- if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
- mutex_unlock(&dev->struct_mutex);
-}
-
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
struct drm_i915_gem_request *src)
{
@@ -2503,9 +2600,29 @@ struct drm_i915_cmd_table {
#define INTEL_INFO(p) (&__I915__(p)->info)
#define INTEL_GEN(p) (INTEL_INFO(p)->gen)
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
-#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
#define REVID_FOREVER 0xff
+#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
+
+#define GEN_FOREVER (0)
+/*
+ * Returns true if Gen is in inclusive range [Start, End].
+ *
+ * Use GEN_FOREVER for unbound start and or end.
+ */
+#define IS_GEN(p, s, e) ({ \
+ unsigned int __s = (s), __e = (e); \
+ BUILD_BUG_ON(!__builtin_constant_p(s)); \
+ BUILD_BUG_ON(!__builtin_constant_p(e)); \
+ if ((__s) != GEN_FOREVER) \
+ __s = (s) - 1; \
+ if ((__e) == GEN_FOREVER) \
+ __e = BITS_PER_LONG - 1; \
+ else \
+ __e = (e) - 1; \
+ !!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \
+})
+
/*
* Return true if revision is in range [since,until] inclusive.
*
@@ -2538,7 +2655,7 @@ struct drm_i915_cmd_table {
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
-#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev))
+#define IS_BROADWELL(dev) (INTEL_INFO(dev)->is_broadwell)
#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
@@ -2606,14 +2723,14 @@ struct drm_i915_cmd_table {
* have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
* chips, etc.).
*/
-#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
-#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
-#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
-#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
-#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
-#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
-#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
-#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9)
+#define IS_GEN2(dev) (INTEL_INFO(dev)->gen_mask & BIT(1))
+#define IS_GEN3(dev) (INTEL_INFO(dev)->gen_mask & BIT(2))
+#define IS_GEN4(dev) (INTEL_INFO(dev)->gen_mask & BIT(3))
+#define IS_GEN5(dev) (INTEL_INFO(dev)->gen_mask & BIT(4))
+#define IS_GEN6(dev) (INTEL_INFO(dev)->gen_mask & BIT(5))
+#define IS_GEN7(dev) (INTEL_INFO(dev)->gen_mask & BIT(6))
+#define IS_GEN8(dev) (INTEL_INFO(dev)->gen_mask & BIT(7))
+#define IS_GEN9(dev) (INTEL_INFO(dev)->gen_mask & BIT(8))
#define RENDER_RING (1<<RCS)
#define BSD_RING (1<<VCS)
@@ -2686,12 +2803,18 @@ struct drm_i915_cmd_table {
IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
IS_KABYLAKE(dev) || IS_BROXTON(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
-#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
+#define HAS_RC6p(dev) (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
#define HAS_CSR(dev) (IS_GEN9(dev))
-#define HAS_GUC_UCODE(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev))
-#define HAS_GUC_SCHED(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev))
+/*
+ * For now, anything with a GuC requires uCode loading, and then supports
+ * command submission once loaded. But these are logically independent
+ * properties, so we have separate macros to test them.
+ */
+#define HAS_GUC(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev))
+#define HAS_GUC_UCODE(dev) (HAS_GUC(dev))
+#define HAS_GUC_SCHED(dev) (HAS_GUC(dev))
#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
INTEL_INFO(dev)->gen >= 8)
@@ -2740,6 +2863,9 @@ extern int i915_max_ioctl;
extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
extern int i915_resume_switcheroo(struct drm_device *dev);
+int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
+ int enable_ppgtt);
+
/* i915_dma.c */
void __printf(3, 4)
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
@@ -2760,9 +2886,9 @@ extern void i915_driver_postclose(struct drm_device *dev,
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
#endif
-extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask);
-extern bool intel_has_gpu_reset(struct drm_device *dev);
-extern int i915_reset(struct drm_device *dev);
+extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
+extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
+extern int i915_reset(struct drm_i915_private *dev_priv);
extern int intel_guc_reset(struct drm_i915_private *dev_priv);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -2772,30 +2898,33 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
/* intel_hotplug.c */
-void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
+void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+ u32 pin_mask, u32 long_mask);
void intel_hpd_init(struct drm_i915_private *dev_priv);
void intel_hpd_init_work(struct drm_i915_private *dev_priv);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
/* i915_irq.c */
-void i915_queue_hangcheck(struct drm_device *dev);
+void i915_queue_hangcheck(struct drm_i915_private *dev_priv);
__printf(3, 4)
-void i915_handle_error(struct drm_device *dev, u32 engine_mask,
+void i915_handle_error(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
const char *fmt, ...);
extern void intel_irq_init(struct drm_i915_private *dev_priv);
int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
-extern void intel_uncore_sanitize(struct drm_device *dev);
-extern void intel_uncore_early_sanitize(struct drm_device *dev,
+extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
+extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
bool restore_forcewake);
-extern void intel_uncore_init(struct drm_device *dev);
+extern void intel_uncore_init(struct drm_i915_private *dev_priv);
extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
-extern void intel_uncore_fini(struct drm_device *dev);
-extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
+extern void intel_uncore_fini(struct drm_i915_private *dev_priv);
+extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
+ bool restore);
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
@@ -2811,9 +2940,9 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
-static inline bool intel_vgpu_active(struct drm_device *dev)
+static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
{
- return to_i915(dev)->vgpu.active;
+ return dev_priv->vgpu.active;
}
void
@@ -2909,7 +3038,7 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int i915_gem_init_userptr(struct drm_device *dev);
+void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
@@ -2919,11 +3048,13 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
void i915_gem_load_init(struct drm_device *dev);
void i915_gem_load_cleanup(struct drm_device *dev);
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
+int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
+
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
-struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
size_t size);
struct drm_i915_gem_object *i915_gem_object_create_from_data(
struct drm_device *dev, const void *data, size_t size);
@@ -3054,6 +3185,11 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args);
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
+
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+ struct drm_i915_gem_object *new,
+ unsigned frontbuffer_bits);
+
/**
* Returns true if seq1 is later than seq2.
*/
@@ -3081,13 +3217,13 @@ static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
req->seqno);
}
-int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
+int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *engine);
-bool i915_gem_retire_requests(struct drm_device *dev);
+bool i915_gem_retire_requests(struct drm_i915_private *dev_priv);
void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
static inline u32 i915_reset_counter(struct i915_gpu_error *error)
@@ -3147,7 +3283,6 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_init(struct drm_device *dev);
int i915_gem_init_engines(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
-int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_engines(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
@@ -3215,8 +3350,6 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
-unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
- struct i915_address_space *vm);
struct i915_vma *
i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
@@ -3251,14 +3384,8 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
}
-static inline unsigned long
-i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
- return i915_gem_obj_size(obj, &ggtt->base);
-}
+unsigned long
+i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj);
static inline int __must_check
i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
@@ -3272,12 +3399,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
alignment, flags | PIN_GLOBAL);
}
-static inline int
-i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
-{
- return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
-}
-
void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view);
static inline void
@@ -3301,28 +3422,42 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
/* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_device *dev);
+void i915_gem_context_lost(struct drm_i915_private *dev_priv);
void i915_gem_context_fini(struct drm_device *dev);
void i915_gem_context_reset(struct drm_device *dev);
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
-int i915_gem_context_enable(struct drm_i915_gem_request *req);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct drm_i915_gem_request *req);
-struct intel_context *
-i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
void i915_gem_context_free(struct kref *ctx_ref);
struct drm_i915_gem_object *
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
-static inline void i915_gem_context_reference(struct intel_context *ctx)
+
+static inline struct i915_gem_context *
+i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
+{
+ struct i915_gem_context *ctx;
+
+ lockdep_assert_held(&file_priv->dev_priv->dev->struct_mutex);
+
+ ctx = idr_find(&file_priv->context_idr, id);
+ if (!ctx)
+ return ERR_PTR(-ENOENT);
+
+ return ctx;
+}
+
+static inline void i915_gem_context_reference(struct i915_gem_context *ctx)
{
kref_get(&ctx->ref);
}
-static inline void i915_gem_context_unreference(struct intel_context *ctx)
+static inline void i915_gem_context_unreference(struct i915_gem_context *ctx)
{
+ lockdep_assert_held(&ctx->i915->dev->struct_mutex);
kref_put(&ctx->ref, i915_gem_context_free);
}
-static inline bool i915_gem_context_is_default(const struct intel_context *c)
+static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
{
return c->user_handle == DEFAULT_CONTEXT_HANDLE;
}
@@ -3335,6 +3470,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev,
@@ -3349,9 +3486,9 @@ int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
/* belongs in i915_gem_gtt.h */
-static inline void i915_gem_chipset_flush(struct drm_device *dev)
+static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
intel_gtt_chipset_flush();
}
@@ -3430,18 +3567,19 @@ static inline void i915_error_state_buf_release(
{
kfree(eb->buf);
}
-void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
+void i915_capture_error_state(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
const char *error_msg);
void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv);
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
void i915_destroy_error_state(struct drm_device *dev);
-void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
+void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_cmd_parser.c */
-int i915_cmd_parser_get_version(void);
+int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
@@ -3489,31 +3627,33 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
/* intel_opregion.c */
#ifdef CONFIG_ACPI
-extern int intel_opregion_setup(struct drm_device *dev);
-extern void intel_opregion_init(struct drm_device *dev);
-extern void intel_opregion_fini(struct drm_device *dev);
-extern void intel_opregion_asle_intr(struct drm_device *dev);
+extern int intel_opregion_setup(struct drm_i915_private *dev_priv);
+extern void intel_opregion_register(struct drm_i915_private *dev_priv);
+extern void intel_opregion_unregister(struct drm_i915_private *dev_priv);
+extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable);
-extern int intel_opregion_notify_adapter(struct drm_device *dev,
+extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
pci_power_t state);
-extern int intel_opregion_get_panel_type(struct drm_device *dev);
+extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
#else
-static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
-static inline void intel_opregion_init(struct drm_device *dev) { return; }
-static inline void intel_opregion_fini(struct drm_device *dev) { return; }
-static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; }
+static inline void intel_opregion_init(struct drm_i915_private *dev) { }
+static inline void intel_opregion_fini(struct drm_i915_private *dev) { }
+static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
+{
+}
static inline int
intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
{
return 0;
}
static inline int
-intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
{
return 0;
}
-static inline int intel_opregion_get_panel_type(struct drm_device *dev)
+static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
{
return -ENODEV;
}
@@ -3538,26 +3678,25 @@ extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void intel_display_resume(struct drm_device *dev);
extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
-extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
-extern void intel_set_rps(struct drm_device *dev, u8 val);
+extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
extern void intel_detect_pch(struct drm_device *dev);
-extern int intel_enable_rc6(const struct drm_device *dev);
-extern bool i915_semaphore_is_enabled(struct drm_device *dev);
+extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
/* overlay */
-extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
+extern struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_overlay_error_state *error);
-extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
+extern struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_i915_private *dev_priv);
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct drm_device *dev,
struct intel_display_error_state *error);
@@ -3586,6 +3725,24 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+/* intel_dpio_phy.c */
+void chv_set_phy_signal_level(struct intel_encoder *encoder,
+ u32 deemph_reg_value, u32 margin_reg_value,
+ bool uniq_trans_scale);
+void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ bool reset);
+void chv_phy_pre_pll_enable(struct intel_encoder *encoder);
+void chv_phy_pre_encoder_enable(struct intel_encoder *encoder);
+void chv_phy_release_cl2_override(struct intel_encoder *encoder);
+void chv_phy_post_pll_disable(struct intel_encoder *encoder);
+
+void vlv_set_phy_signal_level(struct intel_encoder *encoder,
+ u32 demph_reg_value, u32 preemph_reg_value,
+ u32 uniqtranscale_reg_value, u32 tx3_demph);
+void vlv_phy_pre_pll_enable(struct intel_encoder *encoder);
+void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder);
+void vlv_phy_reset_lanes(struct intel_encoder *encoder);
+
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index aad26851cee3..343d88114f3b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
vaddr += PAGE_SIZE;
}
- i915_gem_chipset_flush(obj->base.dev);
+ i915_gem_chipset_flush(to_i915(obj->base.dev));
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
@@ -347,7 +347,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
}
drm_clflush_virt_range(vaddr, args->size);
- i915_gem_chipset_flush(dev);
+ i915_gem_chipset_flush(to_i915(dev));
out:
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
@@ -381,9 +381,9 @@ i915_gem_create(struct drm_file *file,
return -EINVAL;
/* Allocate the new object */
- obj = i915_gem_alloc_object(dev, size);
- if (obj == NULL)
- return -ENOMEM;
+ obj = i915_gem_object_create(dev, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
@@ -1006,7 +1006,7 @@ out:
}
if (needs_clflush_after)
- i915_gem_chipset_flush(dev);
+ i915_gem_chipset_flush(to_i915(dev));
else
obj->cache_dirty = true;
@@ -1230,8 +1230,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
struct intel_rps_client *rps)
{
struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = req->i915;
const bool irq_test_in_progress =
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
@@ -1413,6 +1412,13 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
list_del_init(&request->list);
i915_gem_request_remove_from_client(request);
+ if (request->previous_context) {
+ if (i915.enable_execlists)
+ intel_lr_context_unpin(request->previous_context,
+ request->engine);
+ }
+
+ i915_gem_context_unreference(request->ctx);
i915_gem_request_unreference(request);
}
@@ -1422,7 +1428,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
struct intel_engine_cs *engine = req->engine;
struct drm_i915_gem_request *tmp;
- lockdep_assert_held(&engine->dev->struct_mutex);
+ lockdep_assert_held(&engine->i915->dev->struct_mutex);
if (list_empty(&req->list))
return;
@@ -1982,7 +1988,7 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
return size;
/* Previous chips need a power-of-two fence region when tiling */
- if (INTEL_INFO(dev)->gen == 3)
+ if (IS_GEN3(dev))
gtt_size = 1024*1024;
else
gtt_size = 512*1024;
@@ -2162,7 +2168,8 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
- struct sg_page_iter sg_iter;
+ struct sgt_iter sgt_iter;
+ struct page *page;
int ret;
BUG_ON(obj->madv == __I915_MADV_PURGED);
@@ -2184,9 +2191,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- struct page *page = sg_page_iter_page(&sg_iter);
-
+ for_each_sgt_page(page, sgt_iter, obj->pages) {
if (obj->dirty)
set_page_dirty(page);
@@ -2243,7 +2248,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct address_space *mapping;
struct sg_table *st;
struct scatterlist *sg;
- struct sg_page_iter sg_iter;
+ struct sgt_iter sgt_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
int ret;
@@ -2340,8 +2345,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
err_pages:
sg_mark_end(sg);
- for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
- put_page(sg_page_iter_page(&sg_iter));
+ for_each_sgt_page(page, sgt_iter, st)
+ put_page(page);
sg_free_table(st);
kfree(st);
@@ -2395,6 +2400,44 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
return 0;
}
+/* The 'mapping' part of i915_gem_object_pin_map() below */
+static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
+{
+ unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
+ struct sg_table *sgt = obj->pages;
+ struct sgt_iter sgt_iter;
+ struct page *page;
+ struct page *stack_pages[32];
+ struct page **pages = stack_pages;
+ unsigned long i = 0;
+ void *addr;
+
+ /* A single page can always be kmapped */
+ if (n_pages == 1)
+ return kmap(sg_page(sgt->sgl));
+
+ if (n_pages > ARRAY_SIZE(stack_pages)) {
+ /* Too big for stack -- allocate temporary array instead */
+ pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
+ if (!pages)
+ return NULL;
+ }
+
+ for_each_sgt_page(page, sgt_iter, sgt)
+ pages[i++] = page;
+
+ /* Check that we have the expected number of pages */
+ GEM_BUG_ON(i != n_pages);
+
+ addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
+
+ if (pages != stack_pages)
+ drm_free_large(pages);
+
+ return addr;
+}
+
+/* get, pin, and map the pages of the object into kernel space */
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
{
int ret;
@@ -2407,29 +2450,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
i915_gem_object_pin_pages(obj);
- if (obj->mapping == NULL) {
- struct page **pages;
-
- pages = NULL;
- if (obj->base.size == PAGE_SIZE)
- obj->mapping = kmap(sg_page(obj->pages->sgl));
- else
- pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
- sizeof(*pages),
- GFP_TEMPORARY);
- if (pages != NULL) {
- struct sg_page_iter sg_iter;
- int n;
-
- n = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter,
- obj->pages->nents, 0)
- pages[n++] = sg_page_iter_page(&sg_iter);
-
- obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
- drm_free_large(pages);
- }
- if (obj->mapping == NULL) {
+ if (!obj->mapping) {
+ obj->mapping = i915_gem_object_map(obj);
+ if (!obj->mapping) {
i915_gem_object_unpin_pages(obj);
return ERR_PTR(-ENOMEM);
}
@@ -2502,9 +2525,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
}
static int
-i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
+i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
int ret;
@@ -2514,7 +2536,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
if (ret)
return ret;
}
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev_priv);
/* Finally reset hw state */
for_each_engine(engine, dev_priv)
@@ -2534,7 +2556,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
/* HWS page needs to be set less than what we
* will inject to ring
*/
- ret = i915_gem_init_seqno(dev, seqno - 1);
+ ret = i915_gem_init_seqno(dev_priv, seqno - 1);
if (ret)
return ret;
@@ -2550,13 +2572,11 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
}
int
-i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
+i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* reserve 0 for non-seqno */
if (dev_priv->next_seqno == 0) {
- int ret = i915_gem_init_seqno(dev, 0);
+ int ret = i915_gem_init_seqno(dev_priv, 0);
if (ret)
return ret;
@@ -2580,6 +2600,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
struct drm_i915_private *dev_priv;
struct intel_ringbuffer *ringbuf;
u32 request_start;
+ u32 reserved_tail;
int ret;
if (WARN_ON(request == NULL))
@@ -2594,9 +2615,10 @@ void __i915_add_request(struct drm_i915_gem_request *request,
* should already have been reserved in the ring buffer. Let the ring
* know that it is time to use that space up.
*/
- intel_ring_reserved_space_use(ringbuf);
-
request_start = intel_ring_get_tail(ringbuf);
+ reserved_tail = request->reserved_space;
+ request->reserved_space = 0;
+
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix
@@ -2652,19 +2674,25 @@ void __i915_add_request(struct drm_i915_gem_request *request,
/* Not allowed to fail! */
WARN(ret, "emit|add_request failed: %d!\n", ret);
- i915_queue_hangcheck(engine->dev);
+ i915_queue_hangcheck(engine->i915);
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
- intel_mark_busy(dev_priv->dev);
+ intel_mark_busy(dev_priv);
/* Sanity check that the reserved size was large enough. */
- intel_ring_reserved_space_end(ringbuf);
+ ret = intel_ring_get_tail(ringbuf) - request_start;
+ if (ret < 0)
+ ret += ringbuf->size;
+ WARN_ONCE(ret > reserved_tail,
+ "Not enough space reserved (%d bytes) "
+ "for adding the request (%d bytes)\n",
+ reserved_tail, ret);
}
static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
- const struct intel_context *ctx)
+ const struct i915_gem_context *ctx)
{
unsigned long elapsed;
@@ -2689,7 +2717,7 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
}
static void i915_set_reset_status(struct drm_i915_private *dev_priv,
- struct intel_context *ctx,
+ struct i915_gem_context *ctx,
const bool guilty)
{
struct i915_ctx_hang_stats *hs;
@@ -2712,27 +2740,15 @@ void i915_gem_request_free(struct kref *req_ref)
{
struct drm_i915_gem_request *req = container_of(req_ref,
typeof(*req), ref);
- struct intel_context *ctx = req->ctx;
-
- if (req->file_priv)
- i915_gem_request_remove_from_client(req);
-
- if (ctx) {
- if (i915.enable_execlists && ctx != req->i915->kernel_context)
- intel_lr_context_unpin(ctx, req->engine);
-
- i915_gem_context_unreference(ctx);
- }
-
kmem_cache_free(req->i915->requests, req);
}
static inline int
__i915_gem_request_alloc(struct intel_engine_cs *engine,
- struct intel_context *ctx,
+ struct i915_gem_context *ctx,
struct drm_i915_gem_request **req_out)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
struct drm_i915_gem_request *req;
int ret;
@@ -2754,7 +2770,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
if (req == NULL)
return -ENOMEM;
- ret = i915_gem_get_seqno(engine->dev, &req->seqno);
+ ret = i915_gem_get_seqno(engine->i915, &req->seqno);
if (ret)
goto err;
@@ -2765,15 +2781,6 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
req->ctx = ctx;
i915_gem_context_reference(req->ctx);
- if (i915.enable_execlists)
- ret = intel_logical_ring_alloc_request_extras(req);
- else
- ret = intel_ring_alloc_request_extras(req);
- if (ret) {
- i915_gem_context_unreference(req->ctx);
- goto err;
- }
-
/*
* Reserve space in the ring buffer for all the commands required to
* eventually emit this request. This is to guarantee that the
@@ -2781,24 +2788,20 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
* to be redone if the request is not actually submitted straight
* away, e.g. because a GPU scheduler has deferred it.
*/
+ req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
+
if (i915.enable_execlists)
- ret = intel_logical_ring_reserve_space(req);
+ ret = intel_logical_ring_alloc_request_extras(req);
else
- ret = intel_ring_reserve_space(req);
- if (ret) {
- /*
- * At this point, the request is fully allocated even if not
- * fully prepared. Thus it can be cleaned up using the proper
- * free code.
- */
- intel_ring_reserved_space_cancel(req->ringbuf);
- i915_gem_request_unreference(req);
- return ret;
- }
+ ret = intel_ring_alloc_request_extras(req);
+ if (ret)
+ goto err_ctx;
*req_out = req;
return 0;
+err_ctx:
+ i915_gem_context_unreference(ctx);
err:
kmem_cache_free(dev_priv->requests, req);
return ret;
@@ -2818,13 +2821,13 @@ err:
*/
struct drm_i915_gem_request *
i915_gem_request_alloc(struct intel_engine_cs *engine,
- struct intel_context *ctx)
+ struct i915_gem_context *ctx)
{
struct drm_i915_gem_request *req;
int err;
if (ctx == NULL)
- ctx = to_i915(engine->dev)->kernel_context;
+ ctx = engine->i915->kernel_context;
err = __i915_gem_request_alloc(engine, ctx, &req);
return err ? ERR_PTR(err) : req;
}
@@ -2888,13 +2891,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
/* Ensure irq handler finishes or is cancelled. */
tasklet_kill(&engine->irq_tasklet);
- spin_lock_bh(&engine->execlist_lock);
- /* list_splice_tail_init checks for empty lists */
- list_splice_tail_init(&engine->execlist_queue,
- &engine->execlist_retired_req_list);
- spin_unlock_bh(&engine->execlist_lock);
-
- intel_execlists_retire_requests(engine);
+ intel_execlists_cancel_requests(engine);
}
/*
@@ -3005,9 +3002,8 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
}
bool
-i915_gem_retire_requests(struct drm_device *dev)
+i915_gem_retire_requests(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
bool idle = true;
@@ -3018,8 +3014,6 @@ i915_gem_retire_requests(struct drm_device *dev)
spin_lock_bh(&engine->execlist_lock);
idle &= list_empty(&engine->execlist_queue);
spin_unlock_bh(&engine->execlist_lock);
-
- intel_execlists_retire_requests(engine);
}
}
@@ -3042,7 +3036,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
/* Come back later if the device is busy... */
idle = false;
if (mutex_trylock(&dev->struct_mutex)) {
- idle = i915_gem_retire_requests(dev);
+ idle = i915_gem_retire_requests(dev_priv);
mutex_unlock(&dev->struct_mutex);
}
if (!idle)
@@ -3066,7 +3060,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
* Also locking seems to be fubar here, engine->request_list is protected
* by dev->struct_mutex. */
- intel_mark_idle(dev);
+ intel_mark_idle(dev_priv);
if (mutex_trylock(&dev->struct_mutex)) {
for_each_engine(engine, dev_priv)
@@ -3096,14 +3090,8 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
if (req == NULL)
continue;
- if (list_empty(&req->list))
- goto retire;
-
- if (i915_gem_request_completed(req, true)) {
- __i915_gem_request_retire__upto(req);
-retire:
+ if (i915_gem_request_completed(req, true))
i915_gem_object_retire__read(obj, i);
- }
}
return 0;
@@ -3185,7 +3173,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
ret = __i915_wait_request(req[i], true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
to_rps_client(file));
- i915_gem_request_unreference__unlocked(req[i]);
+ i915_gem_request_unreference(req[i]);
}
return ret;
@@ -3211,7 +3199,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (i915_gem_request_completed(from_req, true))
return 0;
- if (!i915_semaphore_is_enabled(obj->base.dev)) {
+ if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
ret = __i915_wait_request(from_req,
i915->mm.interruptible,
@@ -3345,6 +3333,17 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
old_write_domain);
}
+static void __i915_vma_iounmap(struct i915_vma *vma)
+{
+ GEM_BUG_ON(vma->pin_count);
+
+ if (vma->iomap == NULL)
+ return;
+
+ io_mapping_unmap(vma->iomap);
+ vma->iomap = NULL;
+}
+
static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
{
struct drm_i915_gem_object *obj = vma->obj;
@@ -3377,6 +3376,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
ret = i915_gem_object_put_fence(obj);
if (ret)
return ret;
+
+ __i915_vma_iounmap(vma);
}
trace_i915_vma_unbind(vma);
@@ -3731,7 +3732,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
return;
if (i915_gem_clflush_object(obj, obj->pin_display))
- i915_gem_chipset_flush(obj->base.dev);
+ i915_gem_chipset_flush(to_i915(obj->base.dev));
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
@@ -3929,7 +3930,7 @@ out:
obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
cpu_write_needs_clflush(obj)) {
if (i915_gem_clflush_object(obj, true))
- i915_gem_chipset_flush(obj->base.dev);
+ i915_gem_chipset_flush(to_i915(obj->base.dev));
}
return 0;
@@ -4198,7 +4199,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
- i915_gem_request_unreference__unlocked(target);
+ i915_gem_request_unreference(target);
return ret;
}
@@ -4499,21 +4500,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.put_pages = i915_gem_object_put_pages_gtt,
};
-struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
size_t size)
{
struct drm_i915_gem_object *obj;
struct address_space *mapping;
gfp_t mask;
+ int ret;
obj = i915_gem_object_alloc(dev);
if (obj == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- if (drm_gem_object_init(dev, &obj->base, size) != 0) {
- i915_gem_object_free(obj);
- return NULL;
- }
+ ret = drm_gem_object_init(dev, &obj->base, size);
+ if (ret)
+ goto fail;
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
@@ -4550,6 +4551,11 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
trace_i915_gem_object_create(obj);
return obj;
+
+fail:
+ i915_gem_object_free(obj);
+
+ return ERR_PTR(ret);
}
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
@@ -4655,16 +4661,12 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
- BUG_ON(!view);
+ GEM_BUG_ON(!view);
list_for_each_entry(vma, &obj->vma_list, obj_link)
- if (vma->vm == &ggtt->base &&
- i915_ggtt_view_equal(&vma->ggtt_view, view))
+ if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma;
return NULL;
}
@@ -4706,9 +4708,10 @@ i915_gem_suspend(struct drm_device *dev)
if (ret)
goto err;
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev_priv);
i915_gem_stop_engines(dev);
+ i915_gem_context_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
@@ -4727,37 +4730,6 @@ err:
return ret;
}
-int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
-{
- struct intel_engine_cs *engine = req->engine;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
- int i, ret;
-
- if (!HAS_L3_DPF(dev) || !remap_info)
- return 0;
-
- ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
- if (ret)
- return ret;
-
- /*
- * Note: We do not worry about the concurrent register cacheline hang
- * here because no other code should access these registers other than
- * at initialization time.
- */
- for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
- intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
- intel_ring_emit(engine, remap_info[i]);
- }
-
- intel_ring_advance(engine);
-
- return ret;
-}
-
void i915_gem_init_swizzling(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4862,7 +4834,7 @@ i915_gem_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
- int ret, j;
+ int ret;
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -4914,13 +4886,10 @@ i915_gem_init_hw(struct drm_device *dev)
intel_mocs_init_l3cc_table(dev);
/* We can't enable contexts until all firmware is loaded */
- if (HAS_GUC_UCODE(dev)) {
- ret = intel_guc_ucode_load(dev);
- if (ret) {
- DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
- ret = -EIO;
+ if (HAS_GUC(dev)) {
+ ret = intel_guc_setup(dev);
+ if (ret)
goto out;
- }
}
/*
@@ -4928,44 +4897,6 @@ i915_gem_init_hw(struct drm_device *dev)
* on re-initialisation
*/
ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
- if (ret)
- goto out;
-
- /* Now it is safe to go back round and do everything else: */
- for_each_engine(engine, dev_priv) {
- struct drm_i915_gem_request *req;
-
- req = i915_gem_request_alloc(engine, NULL);
- if (IS_ERR(req)) {
- ret = PTR_ERR(req);
- break;
- }
-
- if (engine->id == RCS) {
- for (j = 0; j < NUM_L3_SLICES(dev); j++) {
- ret = i915_gem_l3_remap(req, j);
- if (ret)
- goto err_request;
- }
- }
-
- ret = i915_ppgtt_init_ring(req);
- if (ret)
- goto err_request;
-
- ret = i915_gem_context_enable(req);
- if (ret)
- goto err_request;
-
-err_request:
- i915_add_request_no_flush(req);
- if (ret) {
- DRM_ERROR("Failed to enable %s, error=%d\n",
- engine->name, ret);
- i915_gem_cleanup_engines(dev);
- break;
- }
- }
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -4977,9 +4908,6 @@ int i915_gem_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- i915.enable_execlists = intel_sanitize_enable_execlists(dev,
- i915.enable_execlists);
-
mutex_lock(&dev->struct_mutex);
if (!i915.enable_execlists) {
@@ -5002,10 +4930,7 @@ int i915_gem_init(struct drm_device *dev)
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- ret = i915_gem_init_userptr(dev);
- if (ret)
- goto out_unlock;
-
+ i915_gem_init_userptr(dev_priv);
i915_gem_init_ggtt(dev);
ret = i915_gem_context_init(dev);
@@ -5042,14 +4967,6 @@ i915_gem_cleanup_engines(struct drm_device *dev)
for_each_engine(engine, dev_priv)
dev_priv->gt.cleanup_engine(engine);
-
- if (i915.enable_execlists)
- /*
- * Neither the BIOS, ourselves or any other kernel
- * expects the system to be in execlists mode on startup,
- * so we need to reset the GPU back to legacy mode.
- */
- intel_gpu_reset(dev, ALL_ENGINES);
}
static void
@@ -5073,7 +4990,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
else
dev_priv->num_fence_regs = 8;
- if (intel_vgpu_active(dev))
+ if (intel_vgpu_active(dev_priv))
dev_priv->num_fence_regs =
I915_READ(vgtif_reg(avail_rs.fence_num));
@@ -5148,6 +5065,34 @@ void i915_gem_load_cleanup(struct drm_device *dev)
kmem_cache_destroy(dev_priv->objects);
}
+int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_gem_object *obj;
+
+ /* Called just before we write the hibernation image.
+ *
+ * We need to update the domain tracking to reflect that the CPU
+ * will be accessing all the pages to create and restore from the
+ * hibernation, and so upon restoration those pages will be in the
+ * CPU domain.
+ *
+ * To make sure the hibernation image contains the latest state,
+ * we update that state just before writing out the image.
+ */
+
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+ return 0;
+}
+
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -5254,13 +5199,10 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view)
{
- struct drm_i915_private *dev_priv = to_i915(o->base.dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link)
- if (vma->vm == &ggtt->base &&
- i915_ggtt_view_equal(&vma->ggtt_view, view))
+ if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma->node.start;
WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
@@ -5286,12 +5228,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view)
{
- struct drm_i915_private *dev_priv = to_i915(o->base.dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link)
- if (vma->vm == &ggtt->base &&
+ if (vma->is_ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node))
return true;
@@ -5310,23 +5250,18 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
return false;
}
-unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
+unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
{
- struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
- WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
-
- BUG_ON(list_empty(&o->vma_list));
+ GEM_BUG_ON(list_empty(&o->vma_list));
list_for_each_entry(vma, &o->vma_list, obj_link) {
if (vma->is_ggtt &&
- vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
- continue;
- if (vma->vm == vm)
+ vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
return vma->node.size;
}
+
return 0;
}
@@ -5365,8 +5300,8 @@ i915_gem_object_create_from_data(struct drm_device *dev,
size_t bytes;
int ret;
- obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE));
- if (IS_ERR_OR_NULL(obj))
+ obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
+ if (IS_ERR(obj))
return obj;
ret = i915_gem_object_set_to_cpu_domain(obj, true);
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index 7bf2f3f2968e..3752d5daa4b2 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -134,9 +134,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
if (obj == NULL) {
int ret;
- obj = i915_gem_alloc_object(pool->dev, size);
- if (obj == NULL)
- return ERR_PTR(-ENOMEM);
+ obj = i915_gem_object_create(pool->dev, size);
+ if (IS_ERR(obj))
+ return obj;
ret = i915_gem_object_get_pages(obj);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index e5acc3916f75..a3b11aac23a4 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -90,6 +90,8 @@
#include "i915_drv.h"
#include "i915_trace.h"
+#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
+
/* This is a HW constraint. The value below is the largest known requirement
* I've seen in a spec to date, and that was a workaround for a non-shipping
* part. It should be safe to decrease this, but it's more future proof as is.
@@ -97,28 +99,27 @@
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
-static size_t get_context_alignment(struct drm_device *dev)
+static size_t get_context_alignment(struct drm_i915_private *dev_priv)
{
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
return GEN6_CONTEXT_ALIGN;
return GEN7_CONTEXT_ALIGN;
}
-static int get_context_size(struct drm_device *dev)
+static int get_context_size(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
u32 reg;
- switch (INTEL_INFO(dev)->gen) {
+ switch (INTEL_GEN(dev_priv)) {
case 6:
reg = I915_READ(CXT_SIZE);
ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
break;
case 7:
reg = I915_READ(GEN7_CXT_SIZE);
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
ret = HSW_CXT_TOTAL_SIZE;
else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
@@ -133,7 +134,7 @@ static int get_context_size(struct drm_device *dev)
return ret;
}
-static void i915_gem_context_clean(struct intel_context *ctx)
+static void i915_gem_context_clean(struct i915_gem_context *ctx)
{
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
struct i915_vma *vma, *next;
@@ -150,13 +151,12 @@ static void i915_gem_context_clean(struct intel_context *ctx)
void i915_gem_context_free(struct kref *ctx_ref)
{
- struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
+ struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
+ int i;
+ lockdep_assert_held(&ctx->i915->dev->struct_mutex);
trace_i915_context_free(ctx);
- if (i915.enable_execlists)
- intel_lr_context_free(ctx);
-
/*
* This context is going away and we need to remove all VMAs still
* around. This is to handle imported shared objects for which
@@ -166,9 +166,22 @@ void i915_gem_context_free(struct kref *ctx_ref)
i915_ppgtt_put(ctx->ppgtt);
- if (ctx->legacy_hw_ctx.rcs_state)
- drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ struct intel_context *ce = &ctx->engine[i];
+
+ if (!ce->state)
+ continue;
+
+ WARN_ON(ce->pin_count);
+ if (ce->ringbuf)
+ intel_ringbuffer_free(ce->ringbuf);
+
+ drm_gem_object_unreference(&ce->state->base);
+ }
+
list_del(&ctx->link);
+
+ ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
kfree(ctx);
}
@@ -178,9 +191,11 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
struct drm_i915_gem_object *obj;
int ret;
- obj = i915_gem_alloc_object(dev, size);
- if (obj == NULL)
- return ERR_PTR(-ENOMEM);
+ lockdep_assert_held(&dev->struct_mutex);
+
+ obj = i915_gem_object_create(dev, size);
+ if (IS_ERR(obj))
+ return obj;
/*
* Try to make the context utilize L3 as well as LLC.
@@ -209,18 +224,46 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
return obj;
}
-static struct intel_context *
+static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
+{
+ int ret;
+
+ ret = ida_simple_get(&dev_priv->context_hw_ida,
+ 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+ if (ret < 0) {
+ /* Contexts are only released when no longer active.
+ * Flush any pending retires to hopefully release some
+ * stale contexts and try again.
+ */
+ i915_gem_retire_requests(dev_priv);
+ ret = ida_simple_get(&dev_priv->context_hw_ida,
+ 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+
+ *out = ret;
+ return 0;
+}
+
+static struct i915_gem_context *
__create_hw_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
return ERR_PTR(-ENOMEM);
+ ret = assign_hw_id(dev_priv, &ctx->hw_id);
+ if (ret) {
+ kfree(ctx);
+ return ERR_PTR(ret);
+ }
+
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->context_list);
ctx->i915 = dev_priv;
@@ -232,7 +275,7 @@ __create_hw_context(struct drm_device *dev,
ret = PTR_ERR(obj);
goto err_out;
}
- ctx->legacy_hw_ctx.rcs_state = obj;
+ ctx->engine[RCS].state = obj;
}
/* Default context will never have a file_priv */
@@ -249,7 +292,7 @@ __create_hw_context(struct drm_device *dev,
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
- ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
+ ctx->remap_slice = ALL_L3_SLICES(dev_priv);
ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
@@ -265,44 +308,27 @@ err_out:
* context state of the GPU for applications that don't utilize HW contexts, as
* well as an idle case.
*/
-static struct intel_context *
+static struct i915_gem_context *
i915_gem_create_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv)
{
- const bool is_global_default_ctx = file_priv == NULL;
- struct intel_context *ctx;
- int ret = 0;
+ struct i915_gem_context *ctx;
- BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ lockdep_assert_held(&dev->struct_mutex);
ctx = __create_hw_context(dev, file_priv);
if (IS_ERR(ctx))
return ctx;
- if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
- /* We may need to do things with the shrinker which
- * require us to immediately switch back to the default
- * context. This can cause a problem as pinning the
- * default context also requires GTT space which may not
- * be available. To avoid this we always pin the default
- * context.
- */
- ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
- get_context_alignment(dev), 0);
- if (ret) {
- DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
- goto err_destroy;
- }
- }
-
if (USES_FULL_PPGTT(dev)) {
struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
- if (IS_ERR_OR_NULL(ppgtt)) {
+ if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
- ret = PTR_ERR(ppgtt);
- goto err_unpin;
+ idr_remove(&file_priv->context_idr, ctx->user_handle);
+ i915_gem_context_unreference(ctx);
+ return ERR_CAST(ppgtt);
}
ctx->ppgtt = ppgtt;
@@ -311,24 +337,19 @@ i915_gem_create_context(struct drm_device *dev,
trace_i915_context_create(ctx);
return ctx;
-
-err_unpin:
- if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
- i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
-err_destroy:
- idr_remove(&file_priv->context_idr, ctx->user_handle);
- i915_gem_context_unreference(ctx);
- return ERR_PTR(ret);
}
-static void i915_gem_context_unpin(struct intel_context *ctx,
+static void i915_gem_context_unpin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
if (i915.enable_execlists) {
intel_lr_context_unpin(ctx, engine);
} else {
- if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state)
- i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
+ struct intel_context *ce = &ctx->engine[engine->id];
+
+ if (ce->state)
+ i915_gem_object_ggtt_unpin(ce->state);
+
i915_gem_context_unreference(ctx);
}
}
@@ -336,51 +357,48 @@ static void i915_gem_context_unpin(struct intel_context *ctx,
void i915_gem_context_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
+
+ lockdep_assert_held(&dev->struct_mutex);
if (i915.enable_execlists) {
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
list_for_each_entry(ctx, &dev_priv->context_list, link)
intel_lr_context_reset(dev_priv, ctx);
}
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- struct intel_engine_cs *engine = &dev_priv->engine[i];
-
- if (engine->last_context) {
- i915_gem_context_unpin(engine->last_context, engine);
- engine->last_context = NULL;
- }
- }
-
- /* Force the GPU state to be reinitialised on enabling */
- dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
+ i915_gem_context_lost(dev_priv);
}
int i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
/* Init should only be called once per module load. Eventually the
* restriction on the context_disabled check can be loosened. */
if (WARN_ON(dev_priv->kernel_context))
return 0;
- if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
+ if (intel_vgpu_active(dev_priv) &&
+ HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
if (!i915.enable_execlists) {
DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
return -EINVAL;
}
}
+ /* Using the simple ida interface, the max is limited by sizeof(int) */
+ BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
+ ida_init(&dev_priv->context_hw_ida);
+
if (i915.enable_execlists) {
/* NB: intentionally left blank. We will allocate our own
* backing objects as we need them, thank you very much */
dev_priv->hw_context_size = 0;
- } else if (HAS_HW_CONTEXTS(dev)) {
- dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
+ } else if (HAS_HW_CONTEXTS(dev_priv)) {
+ dev_priv->hw_context_size =
+ round_up(get_context_size(dev_priv), 4096);
if (dev_priv->hw_context_size > (1<<20)) {
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
dev_priv->hw_context_size);
@@ -395,6 +413,26 @@ int i915_gem_context_init(struct drm_device *dev)
return PTR_ERR(ctx);
}
+ if (!i915.enable_execlists && ctx->engine[RCS].state) {
+ int ret;
+
+ /* We may need to do things with the shrinker which
+ * require us to immediately switch back to the default
+ * context. This can cause a problem as pinning the
+ * default context also requires GTT space which may not
+ * be available. To avoid this we always pin the default
+ * context.
+ */
+ ret = i915_gem_obj_ggtt_pin(ctx->engine[RCS].state,
+ get_context_alignment(dev_priv), 0);
+ if (ret) {
+ DRM_ERROR("Failed to pinned default global context (error %d)\n",
+ ret);
+ i915_gem_context_unreference(ctx);
+ return ret;
+ }
+ }
+
dev_priv->kernel_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n",
@@ -403,67 +441,48 @@ int i915_gem_context_init(struct drm_device *dev)
return 0;
}
-void i915_gem_context_fini(struct drm_device *dev)
+void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_context *dctx = dev_priv->kernel_context;
- int i;
+ struct intel_engine_cs *engine;
- if (dctx->legacy_hw_ctx.rcs_state) {
- /* The only known way to stop the gpu from accessing the hw context is
- * to reset it. Do this as the very last operation to avoid confusing
- * other code, leading to spurious errors. */
- intel_gpu_reset(dev, ALL_ENGINES);
-
- /* When default context is created and switched to, base object refcount
- * will be 2 (+1 from object creation and +1 from do_switch()).
- * i915_gem_context_fini() will be called after gpu_idle() has switched
- * to default context. So we need to unreference the base object once
- * to offset the do_switch part, so that i915_gem_context_unreference()
- * can then free the base object correctly. */
- WARN_ON(!dev_priv->engine[RCS].last_context);
-
- i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
- }
-
- for (i = I915_NUM_ENGINES; --i >= 0;) {
- struct intel_engine_cs *engine = &dev_priv->engine[i];
+ lockdep_assert_held(&dev_priv->dev->struct_mutex);
+ for_each_engine(engine, dev_priv) {
if (engine->last_context) {
i915_gem_context_unpin(engine->last_context, engine);
engine->last_context = NULL;
}
+
+ /* Force the GPU state to be reinitialised on enabling */
+ dev_priv->kernel_context->engine[engine->id].initialised =
+ engine->init_context == NULL;
}
- i915_gem_context_unreference(dctx);
- dev_priv->kernel_context = NULL;
+ /* Force the GPU state to be reinitialised on enabling */
+ dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv);
}
-int i915_gem_context_enable(struct drm_i915_gem_request *req)
+void i915_gem_context_fini(struct drm_device *dev)
{
- struct intel_engine_cs *engine = req->engine;
- int ret;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_gem_context *dctx = dev_priv->kernel_context;
- if (i915.enable_execlists) {
- if (engine->init_context == NULL)
- return 0;
+ lockdep_assert_held(&dev->struct_mutex);
- ret = engine->init_context(req);
- } else
- ret = i915_switch_context(req);
+ if (!i915.enable_execlists && dctx->engine[RCS].state)
+ i915_gem_object_ggtt_unpin(dctx->engine[RCS].state);
- if (ret) {
- DRM_ERROR("ring init context: %d\n", ret);
- return ret;
- }
+ i915_gem_context_unreference(dctx);
+ dev_priv->kernel_context = NULL;
- return 0;
+ ida_destroy(&dev_priv->context_hw_ida);
}
static int context_idr_cleanup(int id, void *p, void *data)
{
- struct intel_context *ctx = p;
+ struct i915_gem_context *ctx = p;
+ ctx->file_priv = ERR_PTR(-EBADF);
i915_gem_context_unreference(ctx);
return 0;
}
@@ -471,7 +490,7 @@ static int context_idr_cleanup(int id, void *p, void *data)
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
idr_init(&file_priv->context_idr);
@@ -491,31 +510,22 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
+ lockdep_assert_held(&dev->struct_mutex);
+
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
}
-struct intel_context *
-i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
-{
- struct intel_context *ctx;
-
- ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
- if (!ctx)
- return ERR_PTR(-ENOENT);
-
- return ctx;
-}
-
static inline int
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
+ struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine = req->engine;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
- i915_semaphore_is_enabled(engine->dev) ?
- hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 :
+ i915_semaphore_is_enabled(dev_priv) ?
+ hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 :
0;
int len, ret;
@@ -524,21 +534,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
* explicitly, so we rely on the value at ring init, stored in
* itlb_before_ctx_switch.
*/
- if (IS_GEN6(engine->dev)) {
+ if (IS_GEN6(dev_priv)) {
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
}
/* These flags are for resource streamer on HSW+ */
- if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8)
+ if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
- else if (INTEL_INFO(engine->dev)->gen < 8)
+ else if (INTEL_GEN(dev_priv) < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
len = 4;
- if (INTEL_INFO(engine->dev)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
len += 2 + (num_rings ? 4*num_rings + 6 : 0);
ret = intel_ring_begin(req, len);
@@ -546,14 +556,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
return ret;
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
- if (INTEL_INFO(engine->dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
if (num_rings) {
struct intel_engine_cs *signaller;
intel_ring_emit(engine,
MI_LOAD_REGISTER_IMM(num_rings));
- for_each_engine(signaller, to_i915(engine->dev)) {
+ for_each_engine(signaller, dev_priv) {
if (signaller == engine)
continue;
@@ -568,7 +578,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
intel_ring_emit(engine, MI_NOOP);
intel_ring_emit(engine, MI_SET_CONTEXT);
intel_ring_emit(engine,
- i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
+ i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
flags);
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
@@ -576,14 +586,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
*/
intel_ring_emit(engine, MI_NOOP);
- if (INTEL_INFO(engine->dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
if (num_rings) {
struct intel_engine_cs *signaller;
i915_reg_t last_reg = {}; /* keep gcc quiet */
intel_ring_emit(engine,
MI_LOAD_REGISTER_IMM(num_rings));
- for_each_engine(signaller, to_i915(engine->dev)) {
+ for_each_engine(signaller, dev_priv) {
if (signaller == engine)
continue;
@@ -609,45 +619,83 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
return ret;
}
-static inline bool skip_rcs_switch(struct intel_engine_cs *engine,
- struct intel_context *to)
+static int remap_l3(struct drm_i915_gem_request *req, int slice)
+{
+ u32 *remap_info = req->i915->l3_parity.remap_info[slice];
+ struct intel_engine_cs *engine = req->engine;
+ int i, ret;
+
+ if (!remap_info)
+ return 0;
+
+ ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
+ if (ret)
+ return ret;
+
+ /*
+ * Note: We do not worry about the concurrent register cacheline hang
+ * here because no other code should access these registers other than
+ * at initialization time.
+ */
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
+ for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
+ intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
+ intel_ring_emit(engine, remap_info[i]);
+ }
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
+
+ return 0;
+}
+
+static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
+ struct intel_engine_cs *engine,
+ struct i915_gem_context *to)
{
if (to->remap_slice)
return false;
- if (!to->legacy_hw_ctx.initialized)
+ if (!to->engine[RCS].initialised)
return false;
- if (to->ppgtt &&
- !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
+ if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
return false;
return to == engine->last_context;
}
static bool
-needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
+needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
+ struct intel_engine_cs *engine,
+ struct i915_gem_context *to)
{
- if (!to->ppgtt)
+ if (!ppgtt)
return false;
+ /* Always load the ppgtt on first use */
+ if (!engine->last_context)
+ return true;
+
+ /* Same context without new entries, skip */
if (engine->last_context == to &&
- !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
+ !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
return false;
if (engine->id != RCS)
return true;
- if (INTEL_INFO(engine->dev)->gen < 8)
+ if (INTEL_GEN(engine->i915) < 8)
return true;
return false;
}
static bool
-needs_pd_load_post(struct intel_context *to, u32 hw_flags)
+needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
+ struct i915_gem_context *to,
+ u32 hw_flags)
{
- if (!to->ppgtt)
+ if (!ppgtt)
return false;
if (!IS_GEN8(to->i915))
@@ -661,18 +709,19 @@ needs_pd_load_post(struct intel_context *to, u32 hw_flags)
static int do_rcs_switch(struct drm_i915_gem_request *req)
{
- struct intel_context *to = req->ctx;
+ struct i915_gem_context *to = req->ctx;
struct intel_engine_cs *engine = req->engine;
- struct intel_context *from;
+ struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
+ struct i915_gem_context *from;
u32 hw_flags;
int ret, i;
- if (skip_rcs_switch(engine, to))
+ if (skip_rcs_switch(ppgtt, engine, to))
return 0;
/* Trying to pin first makes error handling easier. */
- ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
- get_context_alignment(engine->dev),
+ ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
+ get_context_alignment(engine->i915),
0);
if (ret)
return ret;
@@ -694,37 +743,32 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
*
* XXX: We need a real interface to do this instead of trickery.
*/
- ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
+ ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false);
if (ret)
goto unpin_out;
- if (needs_pd_load_pre(engine, to)) {
+ if (needs_pd_load_pre(ppgtt, engine, to)) {
/* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load
* Register Immediate commands in Ring Buffer before submitting
* a context."*/
trace_switch_mm(engine, to);
- ret = to->ppgtt->switch_mm(to->ppgtt, req);
+ ret = ppgtt->switch_mm(ppgtt, req);
if (ret)
goto unpin_out;
}
- if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
+ if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
/* NB: If we inhibit the restore, the context is not allowed to
* die because future work may end up depending on valid address
* space. This means we must enforce that a page table load
* occur when this occurs. */
hw_flags = MI_RESTORE_INHIBIT;
- else if (to->ppgtt &&
- intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)
+ else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
hw_flags = MI_FORCE_RESTORE;
else
hw_flags = 0;
- /* We should never emit switch_mm more than once */
- WARN_ON(needs_pd_load_pre(engine, to) &&
- needs_pd_load_post(to, hw_flags));
-
if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
ret = mi_set_context(req, hw_flags);
if (ret)
@@ -738,8 +782,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
- from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
+ from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@@ -747,10 +791,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet.
*/
- from->legacy_hw_ctx.rcs_state->dirty = 1;
+ from->engine[RCS].state->dirty = 1;
/* obj is kept alive until the next request by its active ref */
- i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
+ i915_gem_object_ggtt_unpin(from->engine[RCS].state);
i915_gem_context_unreference(from);
}
i915_gem_context_reference(to);
@@ -759,9 +803,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
/* GEN8 does *not* require an explicit reload if the PDPs have been
* setup, and we do not wish to move them.
*/
- if (needs_pd_load_post(to, hw_flags)) {
+ if (needs_pd_load_post(ppgtt, to, hw_flags)) {
trace_switch_mm(engine, to);
- ret = to->ppgtt->switch_mm(to->ppgtt, req);
+ ret = ppgtt->switch_mm(ppgtt, req);
/* The hardware context switch is emitted, but we haven't
* actually changed the state - so it's probably safe to bail
* here. Still, let the user know something dangerous has
@@ -771,33 +815,33 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
return ret;
}
- if (to->ppgtt)
- to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
+ if (ppgtt)
+ ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(to->remap_slice & (1<<i)))
continue;
- ret = i915_gem_l3_remap(req, i);
+ ret = remap_l3(req, i);
if (ret)
return ret;
to->remap_slice &= ~(1<<i);
}
- if (!to->legacy_hw_ctx.initialized) {
+ if (!to->engine[RCS].initialised) {
if (engine->init_context) {
ret = engine->init_context(req);
if (ret)
return ret;
}
- to->legacy_hw_ctx.initialized = true;
+ to->engine[RCS].initialised = true;
}
return 0;
unpin_out:
- i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
+ i915_gem_object_ggtt_unpin(to->engine[RCS].state);
return ret;
}
@@ -817,25 +861,24 @@ unpin_out:
int i915_switch_context(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
- struct drm_i915_private *dev_priv = req->i915;
WARN_ON(i915.enable_execlists);
- WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+ lockdep_assert_held(&req->i915->dev->struct_mutex);
- if (engine->id != RCS ||
- req->ctx->legacy_hw_ctx.rcs_state == NULL) {
- struct intel_context *to = req->ctx;
+ if (!req->ctx->engine[engine->id].state) {
+ struct i915_gem_context *to = req->ctx;
+ struct i915_hw_ppgtt *ppgtt =
+ to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
- if (needs_pd_load_pre(engine, to)) {
+ if (needs_pd_load_pre(ppgtt, engine, to)) {
int ret;
trace_switch_mm(engine, to);
- ret = to->ppgtt->switch_mm(to->ppgtt, req);
+ ret = ppgtt->switch_mm(ppgtt, req);
if (ret)
return ret;
- /* Doing a PD load always reloads the page dirs */
- to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
+ ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
}
if (to != engine->last_context) {
@@ -861,7 +904,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_context_create *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
int ret;
if (!contexts_enabled(dev))
@@ -890,7 +933,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_context_destroy *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
int ret;
if (args->pad != 0)
@@ -903,13 +946,13 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- ctx = i915_gem_context_get(file_priv, args->ctx_id);
+ ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(ctx);
}
- idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
+ idr_remove(&file_priv->context_idr, ctx->user_handle);
i915_gem_context_unreference(ctx);
mutex_unlock(&dev->struct_mutex);
@@ -922,14 +965,14 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_context_param *args = data;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- ctx = i915_gem_context_get(file_priv, args->ctx_id);
+ ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(ctx);
@@ -965,14 +1008,14 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_context_param *args = data;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- ctx = i915_gem_context_get(file_priv, args->ctx_id);
+ ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(ctx);
@@ -1004,3 +1047,42 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
return ret;
}
+
+int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_reset_stats *args = data;
+ struct i915_ctx_hang_stats *hs;
+ struct i915_gem_context *ctx;
+ int ret;
+
+ if (args->flags || args->pad)
+ return -EINVAL;
+
+ if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
+ if (IS_ERR(ctx)) {
+ mutex_unlock(&dev->struct_mutex);
+ return PTR_ERR(ctx);
+ }
+ hs = &ctx->hang_stats;
+
+ if (capable(CAP_SYS_ADMIN))
+ args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+ else
+ args->reset_count = 0;
+
+ args->batch_active = hs->batch_active;
+ args->batch_pending = hs->batch_pending;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index ea1f8d1bd228..b144c3f5c650 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -154,7 +154,7 @@ none:
if (ret)
return ret;
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(to_i915(dev));
goto search_again;
}
@@ -265,7 +265,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
if (ret)
return ret;
- i915_gem_retire_requests(vm->dev);
+ i915_gem_retire_requests(to_i915(vm->dev));
WARN_ON(!list_empty(&vm->active_list));
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 33df74d98269..8097698b9622 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -714,7 +714,7 @@ eb_vma_misplaced(struct i915_vma *vma)
static int
i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
struct list_head *vmas,
- struct intel_context *ctx,
+ struct i915_gem_context *ctx,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
@@ -722,7 +722,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
struct i915_address_space *vm;
struct list_head ordered_vmas;
struct list_head pinned_vmas;
- bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4;
+ bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
int retry;
i915_gem_retire_requests_ring(engine);
@@ -826,7 +826,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct intel_engine_cs *engine,
struct eb_vmas *eb,
struct drm_i915_gem_exec_object2 *exec,
- struct intel_context *ctx)
+ struct i915_gem_context *ctx)
{
struct drm_i915_gem_relocation_entry *reloc;
struct i915_address_space *vm;
@@ -963,7 +963,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
}
if (flush_chipset)
- i915_gem_chipset_flush(req->engine->dev);
+ i915_gem_chipset_flush(req->engine->i915);
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
@@ -1063,17 +1063,17 @@ validate_exec_list(struct drm_device *dev,
return 0;
}
-static struct intel_context *
+static struct i915_gem_context *
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *engine, const u32 ctx_id)
{
- struct intel_context *ctx = NULL;
+ struct i915_gem_context *ctx = NULL;
struct i915_ctx_hang_stats *hs;
if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
return ERR_PTR(-EINVAL);
- ctx = i915_gem_context_get(file->driver_priv, ctx_id);
+ ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
if (IS_ERR(ctx))
return ctx;
@@ -1083,14 +1083,6 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
return ERR_PTR(-EIO);
}
- if (i915.enable_execlists && !ctx->engine[engine->id].state) {
- int ret = intel_lr_context_deferred_alloc(ctx, engine);
- if (ret) {
- DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
- return ERR_PTR(ret);
- }
- }
-
return ctx;
}
@@ -1125,7 +1117,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
i915_gem_request_assign(&obj->last_fenced_req, req);
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
&dev_priv->mm.fence_list);
}
@@ -1436,7 +1428,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry;
struct intel_engine_cs *engine;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
struct i915_address_space *vm;
struct i915_execbuffer_params params_master; /* XXX: will be removed later */
struct i915_execbuffer_params *params = &params_master;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index a2b938ec01a7..2b6bdc267fb5 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -745,15 +745,15 @@ i915_gem_swizzle_page(struct page *page)
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct sg_page_iter sg_iter;
+ struct sgt_iter sgt_iter;
+ struct page *page;
int i;
if (obj->bit_17 == NULL)
return;
i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- struct page *page = sg_page_iter_page(&sg_iter);
+ for_each_sgt_page(page, sgt_iter, obj->pages) {
char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) {
@@ -775,7 +775,8 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct sg_page_iter sg_iter;
+ struct sgt_iter sgt_iter;
+ struct page *page;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
@@ -790,8 +791,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
}
i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
+
+ for_each_sgt_page(page, sgt_iter, obj->pages) {
+ if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17);
else
__clear_bit(i, obj->bit_17);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 92acdff9dad3..46684779d4d6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -93,6 +93,13 @@
*
*/
+static inline struct i915_ggtt *
+i915_vm_to_ggtt(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!i915_is_ggtt(vm));
+ return container_of(vm, struct i915_ggtt, base);
+}
+
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
@@ -103,25 +110,29 @@ const struct i915_ggtt_view i915_ggtt_view_rotated = {
.type = I915_GGTT_VIEW_ROTATED,
};
-static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
+int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
+ int enable_ppgtt)
{
bool has_aliasing_ppgtt;
bool has_full_ppgtt;
bool has_full_48bit_ppgtt;
- has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
- has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
- has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
+ has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6;
+ has_full_ppgtt = INTEL_GEN(dev_priv) >= 7;
+ has_full_48bit_ppgtt =
+ IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
- if (intel_vgpu_active(dev))
+ if (intel_vgpu_active(dev_priv))
has_full_ppgtt = false; /* emulation is too hard */
+ if (!has_aliasing_ppgtt)
+ return 0;
+
/*
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
* execlists, the sole mechanism available to submit work.
*/
- if (INTEL_INFO(dev)->gen < 9 &&
- (enable_ppgtt == 0 || !has_aliasing_ppgtt))
+ if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
return 0;
if (enable_ppgtt == 1)
@@ -135,19 +146,19 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
#ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */
- if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
+ if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
DRM_INFO("Disabling PPGTT because VT-d is on\n");
return 0;
}
#endif
/* Early VLV doesn't have this */
- if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) {
+ if (IS_VALLEYVIEW(dev_priv) && dev_priv->dev->pdev->revision < 0xb) {
DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
return 0;
}
- if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
+ if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists)
return has_full_48bit_ppgtt ? 3 : 2;
else
return has_aliasing_ppgtt ? 1 : 0;
@@ -866,6 +877,7 @@ static void gen8_free_page_tables(struct drm_device *dev,
static int gen8_init_scratch(struct i915_address_space *vm)
{
struct drm_device *dev = vm->dev;
+ int ret;
vm->scratch_page = alloc_scratch_page(dev);
if (IS_ERR(vm->scratch_page))
@@ -873,24 +885,21 @@ static int gen8_init_scratch(struct i915_address_space *vm)
vm->scratch_pt = alloc_pt(dev);
if (IS_ERR(vm->scratch_pt)) {
- free_scratch_page(dev, vm->scratch_page);
- return PTR_ERR(vm->scratch_pt);
+ ret = PTR_ERR(vm->scratch_pt);
+ goto free_scratch_page;
}
vm->scratch_pd = alloc_pd(dev);
if (IS_ERR(vm->scratch_pd)) {
- free_pt(dev, vm->scratch_pt);
- free_scratch_page(dev, vm->scratch_page);
- return PTR_ERR(vm->scratch_pd);
+ ret = PTR_ERR(vm->scratch_pd);
+ goto free_pt;
}
if (USES_FULL_48BIT_PPGTT(dev)) {
vm->scratch_pdp = alloc_pdp(dev);
if (IS_ERR(vm->scratch_pdp)) {
- free_pd(dev, vm->scratch_pd);
- free_pt(dev, vm->scratch_pt);
- free_scratch_page(dev, vm->scratch_page);
- return PTR_ERR(vm->scratch_pdp);
+ ret = PTR_ERR(vm->scratch_pdp);
+ goto free_pd;
}
}
@@ -900,6 +909,15 @@ static int gen8_init_scratch(struct i915_address_space *vm)
gen8_initialize_pdp(vm, vm->scratch_pdp);
return 0;
+
+free_pd:
+ free_pd(dev, vm->scratch_pd);
+free_pt:
+ free_pt(dev, vm->scratch_pt);
+free_scratch_page:
+ free_scratch_page(dev, vm->scratch_page);
+
+ return ret;
}
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
@@ -978,7 +996,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (intel_vgpu_active(vm->dev))
+ if (intel_vgpu_active(to_i915(vm->dev)))
gen8_ppgtt_notify_vgt(ppgtt, false);
if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
@@ -1529,14 +1547,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
0, 0,
GEN8_PML4E_SHIFT);
- if (intel_vgpu_active(ppgtt->base.dev)) {
+ if (intel_vgpu_active(to_i915(ppgtt->base.dev))) {
ret = gen8_preallocate_top_level_pdps(ppgtt);
if (ret)
goto free_scratch;
}
}
- if (intel_vgpu_active(ppgtt->base.dev))
+ if (intel_vgpu_active(to_i915(ppgtt->base.dev)))
gen8_ppgtt_notify_vgt(ppgtt, true);
return 0;
@@ -1821,20 +1839,19 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level cache_level, u32 flags)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- gen6_pte_t *pt_vaddr;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned act_pt = first_entry / GEN6_PTES;
unsigned act_pte = first_entry % GEN6_PTES;
- struct sg_page_iter sg_iter;
+ gen6_pte_t *pt_vaddr = NULL;
+ struct sgt_iter sgt_iter;
+ dma_addr_t addr;
- pt_vaddr = NULL;
- for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
+ for_each_sgt_dma(addr, sgt_iter, pages) {
if (pt_vaddr == NULL)
pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
pt_vaddr[act_pte] =
- vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
- cache_level, true, flags);
+ vm->pte_encode(addr, cache_level, true, flags);
if (++act_pte == GEN6_PTES) {
kunmap_px(ppgtt, pt_vaddr);
@@ -1843,6 +1860,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
act_pte = 0;
}
}
+
if (pt_vaddr)
kunmap_px(ppgtt, pt_vaddr);
}
@@ -2064,7 +2082,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
} else
BUG();
- if (intel_vgpu_active(dev))
+ if (intel_vgpu_active(dev_priv))
ppgtt->switch_mm = vgpu_mm_switch;
ret = gen6_ppgtt_alloc(ppgtt);
@@ -2140,7 +2158,7 @@ static void gtt_write_workarounds(struct drm_device *dev)
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
}
-int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
@@ -2179,20 +2197,6 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
return 0;
}
-int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
-{
- struct drm_i915_private *dev_priv = req->i915;
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
- if (i915.enable_execlists)
- return 0;
-
- if (!ppgtt)
- return 0;
-
- return ppgtt->switch_mm(ppgtt, req);
-}
-
struct i915_hw_ppgtt *
i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
{
@@ -2275,12 +2279,11 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
dev_priv->mm.interruptible = interruptible;
}
-void i915_check_and_clear_faults(struct drm_device *dev)
+void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_INFO(dev_priv)->gen < 6)
return;
for_each_engine(engine, dev_priv) {
@@ -2324,7 +2327,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6)
return;
- i915_check_and_clear_faults(dev);
+ i915_check_and_clear_faults(dev_priv);
ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
true);
@@ -2358,23 +2361,21 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level level, u32 unused)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- unsigned first_entry = start >> PAGE_SHIFT;
- gen8_pte_t __iomem *gtt_entries =
- (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
- int i = 0;
- struct sg_page_iter sg_iter;
- dma_addr_t addr = 0; /* shut up gcc */
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ struct sgt_iter sgt_iter;
+ gen8_pte_t __iomem *gtt_entries;
+ gen8_pte_t gtt_entry;
+ dma_addr_t addr;
int rpm_atomic_seq;
+ int i = 0;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
- for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
- addr = sg_dma_address(sg_iter.sg) +
- (sg_iter.sg_pgoffset << PAGE_SHIFT);
- gen8_set_pte(&gtt_entries[i],
- gen8_pte_encode(addr, level, true));
- i++;
+ gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
+
+ for_each_sgt_dma(addr, sgt_iter, st) {
+ gtt_entry = gen8_pte_encode(addr, level, true);
+ gen8_set_pte(&gtt_entries[i++], gtt_entry);
}
/*
@@ -2385,8 +2386,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
- WARN_ON(readq(&gtt_entries[i-1])
- != gen8_pte_encode(addr, level, true));
+ WARN_ON(readq(&gtt_entries[i-1]) != gtt_entry);
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -2436,21 +2436,21 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level level, u32 flags)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- unsigned first_entry = start >> PAGE_SHIFT;
- gen6_pte_t __iomem *gtt_entries =
- (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
- int i = 0;
- struct sg_page_iter sg_iter;
- dma_addr_t addr = 0;
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ struct sgt_iter sgt_iter;
+ gen6_pte_t __iomem *gtt_entries;
+ gen6_pte_t gtt_entry;
+ dma_addr_t addr;
int rpm_atomic_seq;
+ int i = 0;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
- for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
- addr = sg_page_iter_dma_address(&sg_iter);
- iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
- i++;
+ gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
+
+ for_each_sgt_dma(addr, sgt_iter, st) {
+ gtt_entry = vm->pte_encode(addr, level, true, flags);
+ iowrite32(gtt_entry, &gtt_entries[i++]);
}
/* XXX: This serves as a posting read to make sure that the PTE has
@@ -2459,10 +2459,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
* of NUMA access patterns. Therefore, even with the way we assume
* hardware should work, we must keep this posting read for paranoia.
*/
- if (i != 0) {
- unsigned long gtt = readl(&gtt_entries[i-1]);
- WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
- }
+ if (i != 0)
+ WARN_ON(readl(&gtt_entries[i-1]) != gtt_entry);
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -2474,13 +2472,20 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
+static void nop_clear_range(struct i915_address_space *vm,
+ uint64_t start,
+ uint64_t length,
+ bool use_scratch)
+{
+}
+
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
bool use_scratch)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
gen8_pte_t scratch_pte, __iomem *gtt_base =
@@ -2512,7 +2517,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
bool use_scratch)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
gen6_pte_t scratch_pte, __iomem *gtt_base =
@@ -2727,7 +2732,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
i915_address_space_init(&ggtt->base, dev_priv);
ggtt->base.total += PAGE_SIZE;
- if (intel_vgpu_active(dev)) {
+ if (intel_vgpu_active(dev_priv)) {
ret = intel_vgt_balloon(dev);
if (ret)
return ret;
@@ -2831,7 +2836,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
i915_gem_cleanup_stolen(dev);
if (drm_mm_initialized(&ggtt->base.mm)) {
- if (intel_vgpu_active(dev))
+ if (intel_vgpu_active(dev_priv))
intel_vgt_deballoon();
drm_mm_takedown(&ggtt->base.mm);
@@ -3069,14 +3074,17 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ret = ggtt_probe_common(dev, ggtt->size);
- ggtt->base.clear_range = gen8_ggtt_clear_range;
- if (IS_CHERRYVIEW(dev_priv))
- ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
- else
- ggtt->base.insert_entries = gen8_ggtt_insert_entries;
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt->base.clear_range = nop_clear_range;
+ if (!USES_FULL_PPGTT(dev_priv))
+ ggtt->base.clear_range = gen8_ggtt_clear_range;
+
+ ggtt->base.insert_entries = gen8_ggtt_insert_entries;
+ if (IS_CHERRYVIEW(dev_priv))
+ ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
+
return ret;
}
@@ -3219,14 +3227,6 @@ int i915_ggtt_init_hw(struct drm_device *dev)
if (intel_iommu_gfx_mapped)
DRM_INFO("VT-d active for gfx access\n");
#endif
- /*
- * i915.enable_ppgtt is read-only, so do an early pass to validate the
- * user's requested state against the hardware/driver capabilities. We
- * do this now so that we can print out any log messages once rather
- * than every time we check intel_enable_ppgtt().
- */
- i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
- DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
return 0;
@@ -3250,9 +3250,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
- bool flush;
- i915_check_and_clear_faults(dev);
+ i915_check_and_clear_faults(dev_priv);
/* First fill our portion of the GTT with scratch pages */
ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
@@ -3260,19 +3259,16 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
/* Cache flush objects bound into GGTT and rebind them. */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- flush = false;
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->vm != &ggtt->base)
continue;
WARN_ON(i915_vma_bind(vma, obj->cache_level,
PIN_UPDATE));
-
- flush = true;
}
- if (flush)
- i915_gem_clflush_object(obj, obj->pin_display);
+ if (obj->pin_display)
+ WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
}
if (INTEL_INFO(dev)->gen >= 8) {
@@ -3398,9 +3394,11 @@ static struct sg_table *
intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
struct drm_i915_gem_object *obj)
{
+ const size_t n_pages = obj->base.size / PAGE_SIZE;
unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
unsigned int size_pages_uv;
- struct sg_page_iter sg_iter;
+ struct sgt_iter sgt_iter;
+ dma_addr_t dma_addr;
unsigned long i;
dma_addr_t *page_addr_list;
struct sg_table *st;
@@ -3409,7 +3407,7 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
int ret = -ENOMEM;
/* Allocate a temporary list of source pages for random access. */
- page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE,
+ page_addr_list = drm_malloc_gfp(n_pages,
sizeof(dma_addr_t),
GFP_TEMPORARY);
if (!page_addr_list)
@@ -3432,11 +3430,10 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
/* Populate source page list from the object. */
i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- page_addr_list[i] = sg_page_iter_dma_address(&sg_iter);
- i++;
- }
+ for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
+ page_addr_list[i++] = dma_addr;
+ GEM_BUG_ON(i != n_pages);
st->nents = 0;
sg = st->sgl;
@@ -3634,3 +3631,29 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
return obj->base.size;
}
}
+
+void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
+{
+ void __iomem *ptr;
+
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ if (WARN_ON(!vma->obj->map_and_fenceable))
+ return ERR_PTR(-ENODEV);
+
+ GEM_BUG_ON(!vma->is_ggtt);
+ GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0);
+
+ ptr = vma->iomap;
+ if (ptr == NULL) {
+ ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable,
+ vma->node.start,
+ vma->node.size);
+ if (ptr == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ vma->iomap = ptr;
+ }
+
+ vma->pin_count++;
+ return ptr;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 0008543d55f6..62be77cac5cd 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -34,6 +34,8 @@
#ifndef __I915_GEM_GTT_H__
#define __I915_GEM_GTT_H__
+#include <linux/io-mapping.h>
+
struct drm_i915_file_private;
typedef uint32_t gen6_pte_t;
@@ -175,6 +177,7 @@ struct i915_vma {
struct drm_mm_node node;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
+ void __iomem *iomap;
/** Flags and address space this VMA is bound to */
#define GLOBAL_BIND (1<<0)
@@ -518,9 +521,7 @@ int i915_ggtt_enable_hw(struct drm_device *dev);
void i915_gem_init_ggtt(struct drm_device *dev);
void i915_ggtt_cleanup_hw(struct drm_device *dev);
-int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
int i915_ppgtt_init_hw(struct drm_device *dev);
-int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
struct drm_i915_file_private *fpriv);
@@ -535,7 +536,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
kref_put(&ppgtt->ref, i915_ppgtt_release);
}
-void i915_check_and_clear_faults(struct drm_device *dev);
+void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
@@ -560,4 +561,36 @@ size_t
i915_ggtt_view_size(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view);
+/**
+ * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
+ * @vma: VMA to iomap
+ *
+ * The passed in VMA has to be pinned in the global GTT mappable region.
+ * An extra pinning of the VMA is acquired for the return iomapping,
+ * the caller must call i915_vma_unpin_iomap to relinquish the pinning
+ * after the iomapping is no longer required.
+ *
+ * Callers must hold the struct_mutex.
+ *
+ * Returns a valid iomapped pointer or ERR_PTR.
+ */
+void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
+
+/**
+ * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
+ * @vma: VMA to unpin
+ *
+ * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
+ *
+ * Callers must hold the struct_mutex. This function is only valid to be
+ * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
+ */
+static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ GEM_BUG_ON(vma->pin_count == 0);
+ GEM_BUG_ON(vma->iomap == NULL);
+ vma->pin_count--;
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 71611bf21fca..7c93327b70fe 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -29,7 +29,7 @@
#include "intel_renderstate.h"
static const struct intel_renderstate_rodata *
-render_state_get_rodata(struct drm_device *dev, const int gen)
+render_state_get_rodata(const int gen)
{
switch (gen) {
case 6:
@@ -45,21 +45,22 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
return NULL;
}
-static int render_state_init(struct render_state *so, struct drm_device *dev)
+static int render_state_init(struct render_state *so,
+ struct drm_i915_private *dev_priv)
{
int ret;
- so->gen = INTEL_INFO(dev)->gen;
- so->rodata = render_state_get_rodata(dev, so->gen);
+ so->gen = INTEL_GEN(dev_priv);
+ so->rodata = render_state_get_rodata(so->gen);
if (so->rodata == NULL)
return 0;
if (so->rodata->batch_items * 4 > 4096)
return -EINVAL;
- so->obj = i915_gem_alloc_object(dev, 4096);
- if (so->obj == NULL)
- return -ENOMEM;
+ so->obj = i915_gem_object_create(dev_priv->dev, 4096);
+ if (IS_ERR(so->obj))
+ return PTR_ERR(so->obj);
ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
if (ret)
@@ -177,7 +178,7 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
if (WARN_ON(engine->id != RCS))
return -ENOENT;
- ret = render_state_init(so, engine->dev);
+ ret = render_state_init(so, engine->i915);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 425e721aac58..538c30499848 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -131,7 +131,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long count = 0;
trace_i915_gem_shrink(dev_priv, target, flags);
- i915_gem_retire_requests(dev_priv->dev);
+ i915_gem_retire_requests(dev_priv);
+
+ /*
+ * Unbinding of objects will require HW access; Let us not wake the
+ * device just to recover a little memory. If absolutely necessary,
+ * we will force the wake during oom-notifier.
+ */
+ if ((flags & I915_SHRINK_BOUND) &&
+ !intel_runtime_pm_get_if_in_use(dev_priv))
+ flags &= ~I915_SHRINK_BOUND;
/*
* As we may completely rewrite the (un)bound list whilst unbinding
@@ -197,7 +206,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
list_splice(&still_in_list, phase->list);
}
- i915_gem_retire_requests(dev_priv->dev);
+ if (flags & I915_SHRINK_BOUND)
+ intel_runtime_pm_put(dev_priv);
+
+ i915_gem_retire_requests(dev_priv);
return count;
}
@@ -345,7 +357,9 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
return NOTIFY_DONE;
+ intel_runtime_pm_get(dev_priv);
freed_pages = i915_gem_shrink_all(dev_priv);
+ intel_runtime_pm_put(dev_priv);
/* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not
@@ -386,17 +400,35 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, mm.vmap_notifier);
struct shrinker_lock_uninterruptible slu;
- unsigned long freed_pages;
+ struct i915_vma *vma, *next;
+ unsigned long freed_pages = 0;
+ int ret;
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
return NOTIFY_DONE;
- freed_pages = i915_gem_shrink(dev_priv, -1UL,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_ACTIVE |
- I915_SHRINK_VMAPS);
+ /* Force everything onto the inactive lists */
+ ret = i915_gpu_idle(dev_priv->dev);
+ if (ret)
+ goto out;
+
+ intel_runtime_pm_get(dev_priv);
+ freed_pages += i915_gem_shrink(dev_priv, -1UL,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_ACTIVE |
+ I915_SHRINK_VMAPS);
+ intel_runtime_pm_put(dev_priv);
+
+ /* We also want to clear any cached iomaps as they wrap vmap */
+ list_for_each_entry_safe(vma, next,
+ &dev_priv->ggtt.base.inactive_list, vm_link) {
+ unsigned long count = vma->node.size >> PAGE_SHIFT;
+ if (vma->iomap && i915_vma_unbind(vma) == 0)
+ freed_pages += count;
+ }
+out:
i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
*(unsigned long *)ptr += freed_pages;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index b7ce963fb8f8..f9253f2b7ba0 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -56,7 +56,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
/* See the comment at the drm_mm_init() call for more about this check.
* WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
- if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096)
+ if (IS_GEN8(dev_priv) && start < 4096)
start = 4096;
mutex_lock(&dev_priv->mm.stolen_lock);
@@ -109,9 +109,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 3) {
u32 bsm;
- pci_read_config_dword(dev->pdev, BSM, &bsm);
+ pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm);
- base = bsm & BSM_MASK;
+ base = bsm & INTEL_BSM_MASK;
} else if (IS_I865G(dev)) {
u16 toud = 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index b9bdb34032cd..a6eb5c47a49c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -125,7 +125,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
if (INTEL_INFO(obj->base.dev)->gen >= 4)
return true;
- if (INTEL_INFO(obj->base.dev)->gen == 3) {
+ if (IS_GEN3(obj->base.dev)) {
if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
return false;
} else {
@@ -229,7 +229,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
*/
if (obj->map_and_fenceable &&
!i915_gem_object_fence_ok(obj, args->tiling_mode))
- ret = i915_gem_object_ggtt_unbind(obj);
+ ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
if (ret == 0) {
if (obj->pages &&
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 32d9726e38b1..2314c88323e3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -706,7 +706,8 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
static void
i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
{
- struct sg_page_iter sg_iter;
+ struct sgt_iter sgt_iter;
+ struct page *page;
BUG_ON(obj->userptr.work != NULL);
__i915_gem_userptr_set_active(obj, false);
@@ -716,9 +717,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
i915_gem_gtt_finish_object(obj);
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- struct page *page = sg_page_iter_page(&sg_iter);
-
+ for_each_sgt_page(page, sgt_iter, obj->pages) {
if (obj->dirty)
set_page_dirty(page);
@@ -855,11 +854,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
return 0;
}
-int
-i915_gem_init_userptr(struct drm_device *dev)
+void i915_gem_init_userptr(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
mutex_init(&dev_priv->mm_lock);
hash_init(dev_priv->mm_structs);
- return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 89725c9efc25..34ff2459ceea 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -411,7 +411,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- if (INTEL_INFO(dev)->gen == 7)
+ if (IS_GEN7(dev))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for (i = 0; i < ARRAY_SIZE(error->ring); i++)
@@ -824,19 +824,18 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
return error_code;
}
-static void i915_gem_record_fences(struct drm_device *dev,
+static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- if (IS_GEN3(dev) || IS_GEN2(dev)) {
+ if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ(FENCE_REG(i));
- } else if (IS_GEN5(dev) || IS_GEN4(dev)) {
+ } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
}
@@ -851,7 +850,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
struct intel_engine_cs *to;
enum intel_engine_id id;
- if (!i915_semaphore_is_enabled(dev_priv->dev))
+ if (!i915_semaphore_is_enabled(dev_priv))
return;
if (!error->semaphore_obj)
@@ -893,31 +892,29 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
}
}
-static void i915_record_ring_state(struct drm_device *dev,
+static void i915_record_ring_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
struct intel_engine_cs *engine,
struct drm_i915_error_ring *ering)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
gen8_record_semaphore_state(dev_priv, error, engine,
ering);
else
gen6_record_semaphore_state(dev_priv, engine, ering);
}
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
}
@@ -939,10 +936,10 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->tail = I915_READ_TAIL(engine);
ering->ctl = I915_READ_CTL(engine);
- if (I915_NEED_GFX_HWS(dev)) {
+ if (I915_NEED_GFX_HWS(dev_priv)) {
i915_reg_t mmio;
- if (IS_GEN7(dev)) {
+ if (IS_GEN7(dev_priv)) {
switch (engine->id) {
default:
case RCS:
@@ -958,7 +955,7 @@ static void i915_record_ring_state(struct drm_device *dev,
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN6(engine->dev)) {
+ } else if (IS_GEN6(engine->i915)) {
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
@@ -971,18 +968,18 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->hangcheck_score = engine->hangcheck.score;
ering->hangcheck_action = engine->hangcheck.action;
- if (USES_PPGTT(dev)) {
+ if (USES_PPGTT(dev_priv)) {
int i;
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE_READ(engine));
- else if (IS_GEN7(dev))
+ else if (IS_GEN7(dev_priv))
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(engine));
- else if (INTEL_INFO(dev)->gen >= 8)
+ else if (INTEL_GEN(dev_priv) >= 8)
for (i = 0; i < 4; i++) {
ering->vm_info.pdp[i] =
I915_READ(GEN8_RING_PDP_UDW(engine, i));
@@ -998,7 +995,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
struct drm_i915_error_state *error,
struct drm_i915_error_ring *ering)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_object *obj;
/* Currently render ring is the only HW context user */
@@ -1016,10 +1013,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
}
}
-static void i915_gem_record_rings(struct drm_device *dev,
+static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_request *request;
int i, count;
@@ -1030,12 +1026,12 @@ static void i915_gem_record_rings(struct drm_device *dev,
error->ring[i].pid = -1;
- if (engine->dev == NULL)
+ if (!intel_engine_initialized(engine))
continue;
error->ring[i].valid = true;
- i915_record_ring_state(dev, error, engine, &error->ring[i]);
+ i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
request = i915_gem_find_active_request(engine);
if (request) {
@@ -1301,15 +1297,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
- i915_get_extra_instdone(dev, error->extra_instdone);
+ i915_get_extra_instdone(dev_priv, error->extra_instdone);
}
-static void i915_error_capture_msg(struct drm_device *dev,
+static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
u32 engine_mask,
const char *error_msg)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 ecode;
int ring_id = -1, len;
@@ -1317,7 +1312,7 @@ static void i915_error_capture_msg(struct drm_device *dev,
len = scnprintf(error->error_msg, sizeof(error->error_msg),
"GPU HANG: ecode %d:%d:0x%08x",
- INTEL_INFO(dev)->gen, ring_id, ecode);
+ INTEL_GEN(dev_priv), ring_id, ecode);
if (ring_id != -1 && error->ring[ring_id].pid != -1)
len += scnprintf(error->error_msg + len,
@@ -1352,11 +1347,11 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
* out a structure which becomes available in debugfs for user level tools
* to pick up.
*/
-void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
+void i915_capture_error_state(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
const char *error_msg)
{
static bool warned;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
unsigned long flags;
@@ -1372,15 +1367,15 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
i915_capture_gen_state(dev_priv, error);
i915_capture_reg_state(dev_priv, error);
i915_gem_capture_buffers(dev_priv, error);
- i915_gem_record_fences(dev, error);
- i915_gem_record_rings(dev, error);
+ i915_gem_record_fences(dev_priv, error);
+ i915_gem_record_rings(dev_priv, error);
do_gettimeofday(&error->time);
- error->overlay = intel_overlay_capture_error_state(dev);
- error->display = intel_display_capture_error_state(dev);
+ error->overlay = intel_overlay_capture_error_state(dev_priv);
+ error->display = intel_display_capture_error_state(dev_priv);
- i915_error_capture_msg(dev, error, engine_mask, error_msg);
+ i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg);
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
@@ -1400,7 +1395,7 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
- DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
+ DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index);
warned = true;
}
}
@@ -1450,17 +1445,17 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
}
/* NB: please notice the memset */
-void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
+void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
+ uint32_t *instdone)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
- if (IS_GEN2(dev) || IS_GEN3(dev))
+ if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
instdone[0] = I915_READ(GEN2_INSTDONE);
- else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
+ else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
instdone[1] = I915_READ(GEN4_INSTDONE1);
- } else if (INTEL_INFO(dev)->gen >= 7) {
+ } else if (INTEL_GEN(dev_priv) >= 7) {
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
index 80786d9f9ad3..cf5a65be4fe0 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -67,11 +67,11 @@
#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
+/* Defines WOPCM space available to GuC firmware */
#define GUC_WOPCM_SIZE _MMIO(0xc050)
-#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
-
/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
-#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE)
+#define GUC_WOPCM_TOP (0x80 << 12) /* 512KB */
+#define BXT_GUC_WOPCM_RC6_RESERVED (0x10 << 12) /* 64KB */
#define GEN8_GT_PM_CONFIG _MMIO(0x138140)
#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index d40c13fb6643..ac72451c571c 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -158,8 +158,7 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
/* WaRsDisableCoarsePowerGating:skl,bxt */
- if (!intel_enable_rc6(dev) ||
- NEEDS_WaRsDisableCoarsePowerGating(dev))
+ if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev))
data[1] = 0;
else
/* bit 0 and 1 are for Render and Media domain separately */
@@ -361,10 +360,9 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
struct drm_i915_gem_object *client_obj = client->client_obj;
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *engine;
- struct intel_context *ctx = client->owner;
+ struct i915_gem_context *ctx = client->owner;
struct guc_context_desc desc;
struct sg_table *sg;
- enum intel_engine_id id;
u32 gfx_addr;
memset(&desc, 0, sizeof(desc));
@@ -374,10 +372,10 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
desc.priority = client->priority;
desc.db_id = client->doorbell_id;
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv) {
+ struct intel_context *ce = &ctx->engine[engine->id];
struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
struct drm_i915_gem_object *obj;
- uint64_t ctx_desc;
/* TODO: We have a design issue to be solved here. Only when we
* receive the first batch, we know which engine is used by the
@@ -386,20 +384,18 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
* for now who owns a GuC client. But for future owner of GuC
* client, need to make sure lrc is pinned prior to enter here.
*/
- obj = ctx->engine[id].state;
- if (!obj)
+ if (!ce->state)
break; /* XXX: continue? */
- ctx_desc = intel_lr_context_descriptor(ctx, engine);
- lrc->context_desc = (u32)ctx_desc;
+ lrc->context_desc = lower_32_bits(ce->lrc_desc);
/* The state page is after PPHWSP */
- gfx_addr = i915_gem_obj_ggtt_offset(obj);
+ gfx_addr = i915_gem_obj_ggtt_offset(ce->state);
lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
- obj = ctx->engine[id].ringbuf->obj;
+ obj = ce->ringbuf->obj;
gfx_addr = i915_gem_obj_ggtt_offset(obj);
lrc->ring_begin = gfx_addr;
@@ -427,7 +423,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
desc.wq_size = client->wq_size;
/*
- * XXX: Take LRCs from an existing intel_context if this is not an
+ * XXX: Take LRCs from an existing context if this is not an
* IsKMDCreatedContext client
*/
desc.desc_private = (uintptr_t)client;
@@ -451,47 +447,64 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
sizeof(desc) * client->ctx_index);
}
-int i915_guc_wq_check_space(struct i915_guc_client *gc)
+/**
+ * i915_guc_wq_check_space() - check that the GuC can accept a request
+ * @request: request associated with the commands
+ *
+ * Return: 0 if space is available
+ * -EAGAIN if space is not currently available
+ *
+ * This function must be called (and must return 0) before a request
+ * is submitted to the GuC via i915_guc_submit() below. Once a result
+ * of 0 has been returned, it remains valid until (but only until)
+ * the next call to submit().
+ *
+ * This precheck allows the caller to determine in advance that space
+ * will be available for the next submission before committing resources
+ * to it, and helps avoid late failures with complicated recovery paths.
+ */
+int i915_guc_wq_check_space(struct drm_i915_gem_request *request)
{
+ const size_t wqi_size = sizeof(struct guc_wq_item);
+ struct i915_guc_client *gc = request->i915->guc.execbuf_client;
struct guc_process_desc *desc;
- u32 size = sizeof(struct guc_wq_item);
- int ret = -ETIMEDOUT, timeout_counter = 200;
+ u32 freespace;
- if (!gc)
- return 0;
+ GEM_BUG_ON(gc == NULL);
desc = gc->client_base + gc->proc_desc_offset;
- while (timeout_counter-- > 0) {
- if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
- ret = 0;
- break;
- }
+ freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
+ if (likely(freespace >= wqi_size))
+ return 0;
- if (timeout_counter)
- usleep_range(1000, 2000);
- };
+ gc->no_wq_space += 1;
- return ret;
+ return -EAGAIN;
}
-static int guc_add_workqueue_item(struct i915_guc_client *gc,
- struct drm_i915_gem_request *rq)
+static void guc_add_workqueue_item(struct i915_guc_client *gc,
+ struct drm_i915_gem_request *rq)
{
+ /* wqi_len is in DWords, and does not include the one-word header */
+ const size_t wqi_size = sizeof(struct guc_wq_item);
+ const u32 wqi_len = wqi_size/sizeof(u32) - 1;
struct guc_process_desc *desc;
struct guc_wq_item *wqi;
void *base;
- u32 tail, wq_len, wq_off, space;
+ u32 freespace, tail, wq_off, wq_page;
desc = gc->client_base + gc->proc_desc_offset;
- space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
- if (WARN_ON(space < sizeof(struct guc_wq_item)))
- return -ENOSPC; /* shouldn't happen */
- /* postincrement WQ tail for next time */
- wq_off = gc->wq_tail;
- gc->wq_tail += sizeof(struct guc_wq_item);
- gc->wq_tail &= gc->wq_size - 1;
+ /* Free space is guaranteed, see i915_guc_wq_check_space() above */
+ freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
+ GEM_BUG_ON(freespace < wqi_size);
+
+ /* The GuC firmware wants the tail index in QWords, not bytes */
+ tail = rq->tail;
+ GEM_BUG_ON(tail & 7);
+ tail >>= 3;
+ GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
* should not have the case where structure wqi is across page, neither
@@ -500,19 +513,23 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
* XXX: if not the case, we need save data to a temp wqi and copy it to
* workqueue buffer dw by dw.
*/
- WARN_ON(sizeof(struct guc_wq_item) != 16);
- WARN_ON(wq_off & 3);
+ BUILD_BUG_ON(wqi_size != 16);
- /* wq starts from the page after doorbell / process_desc */
- base = kmap_atomic(i915_gem_object_get_page(gc->client_obj,
- (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT));
+ /* postincrement WQ tail for next time */
+ wq_off = gc->wq_tail;
+ gc->wq_tail += wqi_size;
+ gc->wq_tail &= gc->wq_size - 1;
+ GEM_BUG_ON(wq_off & (wqi_size - 1));
+
+ /* WQ starts from the page after doorbell / process_desc */
+ wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT;
wq_off &= PAGE_SIZE - 1;
+ base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, wq_page));
wqi = (struct guc_wq_item *)((char *)base + wq_off);
- /* len does not include the header */
- wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
+ /* Now fill in the 4-word work queue item */
wqi->header = WQ_TYPE_INORDER |
- (wq_len << WQ_LEN_SHIFT) |
+ (wqi_len << WQ_LEN_SHIFT) |
(rq->engine->guc_id << WQ_TARGET_SHIFT) |
WQ_NO_WCFLUSH_WAIT;
@@ -520,48 +537,50 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx,
rq->engine);
- /* The GuC firmware wants the tail index in QWords, not bytes */
- tail = rq->ringbuf->tail >> 3;
wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
- wqi->fence_id = 0; /*XXX: what fence to be here */
+ wqi->fence_id = rq->seqno;
kunmap_atomic(base);
-
- return 0;
}
/**
* i915_guc_submit() - Submit commands through GuC
- * @client: the guc client where commands will go through
* @rq: request associated with the commands
*
- * Return: 0 if succeed
+ * Return: 0 on success, otherwise an errno.
+ * (Note: nonzero really shouldn't happen!)
+ *
+ * The caller must have already called i915_guc_wq_check_space() above
+ * with a result of 0 (success) since the last request submission. This
+ * guarantees that there is space in the work queue for the new request,
+ * so enqueuing the item cannot fail.
+ *
+ * Bad Things Will Happen if the caller violates this protocol e.g. calls
+ * submit() when check() says there's no space, or calls submit() multiple
+ * times with no intervening check().
+ *
+ * The only error here arises if the doorbell hardware isn't functioning
+ * as expected, which really shouln't happen.
*/
-int i915_guc_submit(struct i915_guc_client *client,
- struct drm_i915_gem_request *rq)
+int i915_guc_submit(struct drm_i915_gem_request *rq)
{
- struct intel_guc *guc = client->guc;
unsigned int engine_id = rq->engine->guc_id;
- int q_ret, b_ret;
+ struct intel_guc *guc = &rq->i915->guc;
+ struct i915_guc_client *client = guc->execbuf_client;
+ int b_ret;
- q_ret = guc_add_workqueue_item(client, rq);
- if (q_ret == 0)
- b_ret = guc_ring_doorbell(client);
+ guc_add_workqueue_item(client, rq);
+ b_ret = guc_ring_doorbell(client);
client->submissions[engine_id] += 1;
- if (q_ret) {
- client->q_fail += 1;
- client->retcode = q_ret;
- } else if (b_ret) {
+ client->retcode = b_ret;
+ if (b_ret)
client->b_fail += 1;
- client->retcode = q_ret = b_ret;
- } else {
- client->retcode = 0;
- }
+
guc->submissions[engine_id] += 1;
guc->last_seqno[engine_id] = rq->seqno;
- return q_ret;
+ return b_ret;
}
/*
@@ -587,8 +606,8 @@ static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
- obj = i915_gem_alloc_object(dev, size);
- if (!obj)
+ obj = i915_gem_object_create(dev, size);
+ if (IS_ERR(obj))
return NULL;
if (i915_gem_object_get_pages(obj)) {
@@ -678,7 +697,7 @@ static void guc_client_free(struct drm_device *dev,
*/
static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
uint32_t priority,
- struct intel_context *ctx)
+ struct i915_gem_context *ctx)
{
struct i915_guc_client *client;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -916,11 +935,12 @@ int i915_guc_submission_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
- struct intel_context *ctx = dev_priv->kernel_context;
struct i915_guc_client *client;
/* client for execbuf submission */
- client = guc_client_alloc(dev, GUC_CTX_PRIORITY_KMD_NORMAL, ctx);
+ client = guc_client_alloc(dev,
+ GUC_CTX_PRIORITY_KMD_NORMAL,
+ dev_priv->kernel_context);
if (!client) {
DRM_ERROR("Failed to create execbuf guc_client\n");
return -ENOMEM;
@@ -967,10 +987,10 @@ int intel_guc_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
u32 data[3];
- if (!i915.enable_guc_submission)
+ if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
return 0;
ctx = dev_priv->kernel_context;
@@ -993,10 +1013,10 @@ int intel_guc_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
u32 data[3];
- if (!i915.enable_guc_submission)
+ if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
return 0;
ctx = dev_priv->kernel_context;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2f6fd33c07ba..5c7378374ae6 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -336,9 +336,8 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
__gen6_disable_pm_irq(dev_priv, mask);
}
-void gen6_reset_rps_interrupts(struct drm_device *dev)
+void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
i915_reg_t reg = gen6_pm_iir(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
@@ -349,10 +348,8 @@ void gen6_reset_rps_interrupts(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
}
-void gen6_enable_rps_interrupts(struct drm_device *dev)
+void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
@@ -367,25 +364,11 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
{
- /*
- * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
- * if GEN6_PM_UP_EI_EXPIRED is masked.
- *
- * TODO: verify if this can be reproduced on VLV,CHV.
- */
- if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
- mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
-
- if (INTEL_INFO(dev_priv)->gen >= 8)
- mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
-
- return mask;
+ return (mask & ~dev_priv->rps.pm_intr_keep);
}
-void gen6_disable_rps_interrupts(struct drm_device *dev)
+void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->rps.interrupts_enabled = false;
spin_unlock_irq(&dev_priv->irq_lock);
@@ -402,7 +385,7 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
- synchronize_irq(dev->irq);
+ synchronize_irq(dev_priv->dev->irq);
}
/**
@@ -607,17 +590,15 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
* i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
* @dev: drm device
*/
-static void i915_enable_asle_pipestat(struct drm_device *dev)
+static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
+ if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
return;
spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
i915_enable_pipestat(dev_priv, PIPE_A,
PIPE_LEGACY_BLC_EVENT_STATUS);
@@ -750,7 +731,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
else
position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
@@ -767,7 +748,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
* problem. We may need to extend this to include other platforms,
* but so far testing only shows the problem on HSW.
*/
- if (HAS_DDI(dev) && !position) {
+ if (HAS_DDI(dev_priv) && !position) {
int i, temp;
for (i = 0; i < 100; i++) {
@@ -835,7 +816,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
if (stime)
*stime = ktime_get();
- if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
/* No obvious pixelcount register. Only query vertical
* scanout position from Display scan line register.
*/
@@ -897,7 +878,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
else
position += vtotal - vbl_end;
- if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
*vpos = position;
*hpos = 0;
} else {
@@ -955,9 +936,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
&crtc->hwmode);
}
-static void ironlake_rps_change_irq_handler(struct drm_device *dev)
+static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 busy_up, busy_down, max_avg, min_avg;
u8 new_delay;
@@ -986,7 +966,7 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
new_delay = dev_priv->ips.min_delay;
}
- if (ironlake_set_drps(dev, new_delay))
+ if (ironlake_set_drps(dev_priv, new_delay))
dev_priv->ips.cur_delay = new_delay;
spin_unlock(&mchdev_lock);
@@ -1175,7 +1155,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
new_delay += adj;
new_delay = clamp_t(int, new_delay, min, max);
- intel_set_rps(dev_priv->dev, new_delay);
+ intel_set_rps(dev_priv, new_delay);
mutex_unlock(&dev_priv->rps.hw_lock);
out:
@@ -1506,27 +1486,23 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
}
-static void gmbus_irq_handler(struct drm_device *dev)
+static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
wake_up_all(&dev_priv->gmbus_wait_queue);
}
-static void dp_aux_irq_handler(struct drm_device *dev)
+static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
wake_up_all(&dev_priv->gmbus_wait_queue);
}
#if defined(CONFIG_DEBUG_FS)
-static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
uint32_t crc0, uint32_t crc1,
uint32_t crc2, uint32_t crc3,
uint32_t crc4)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_pipe_crc_entry *entry;
int head, tail;
@@ -1550,7 +1526,8 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
entry = &pipe_crc->entries[head];
- entry->frame = dev->driver->get_vblank_counter(dev, pipe);
+ entry->frame = dev_priv->dev->driver->get_vblank_counter(dev_priv->dev,
+ pipe);
entry->crc[0] = crc0;
entry->crc[1] = crc1;
entry->crc[2] = crc2;
@@ -1566,27 +1543,26 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
}
#else
static inline void
-display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
uint32_t crc0, uint32_t crc1,
uint32_t crc2, uint32_t crc3,
uint32_t crc4) {}
#endif
-static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- display_pipe_crc_irq_handler(dev, pipe,
+ display_pipe_crc_irq_handler(dev_priv, pipe,
I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
0, 0, 0, 0);
}
-static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- display_pipe_crc_irq_handler(dev, pipe,
+ display_pipe_crc_irq_handler(dev_priv, pipe,
I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
@@ -1594,22 +1570,22 @@ static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
}
-static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t res1, res2;
- if (INTEL_INFO(dev)->gen >= 3)
+ if (INTEL_GEN(dev_priv) >= 3)
res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
else
res1 = 0;
- if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
else
res2 = 0;
- display_pipe_crc_irq_handler(dev, pipe,
+ display_pipe_crc_irq_handler(dev_priv, pipe,
I915_READ(PIPE_CRC_RES_RED(pipe)),
I915_READ(PIPE_CRC_RES_GREEN(pipe)),
I915_READ(PIPE_CRC_RES_BLUE(pipe)),
@@ -1643,18 +1619,21 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
}
}
-static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
+static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- if (!drm_handle_vblank(dev, pipe))
- return false;
+ bool ret;
- return true;
+ ret = drm_handle_vblank(dev_priv->dev, pipe);
+ if (ret)
+ intel_finish_page_flip_mmio(dev_priv, pipe);
+
+ return ret;
}
-static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir,
- u32 pipe_stats[I915_MAX_PIPES])
+static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
+ u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
spin_lock(&dev_priv->irq_lock);
@@ -1710,31 +1689,28 @@ static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir,
spin_unlock(&dev_priv->irq_lock);
}
-static void valleyview_pipestat_irq_handler(struct drm_device *dev,
+static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
u32 pipe_stats[I915_MAX_PIPES])
{
- struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
- intel_pipe_handle_vblank(dev, pipe))
- intel_check_page_flip(dev, pipe);
+ intel_pipe_handle_vblank(dev_priv, pipe))
+ intel_check_page_flip(dev_priv, pipe);
- if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
- intel_prepare_page_flip(dev, pipe);
- intel_finish_page_flip(dev, pipe);
- }
+ if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
+ intel_finish_page_flip_cs(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev, pipe);
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
}
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
- gmbus_irq_handler(dev);
+ gmbus_irq_handler(dev_priv);
}
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
@@ -1747,12 +1723,13 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
return hotplug_status;
}
-static void i9xx_hpd_irq_handler(struct drm_device *dev,
+static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 hotplug_status)
{
u32 pin_mask = 0, long_mask = 0;
- if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) {
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
if (hotplug_trigger) {
@@ -1760,11 +1737,11 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev,
hotplug_trigger, hpd_status_g4x,
i9xx_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
- dp_aux_irq_handler(dev);
+ dp_aux_irq_handler(dev_priv);
} else {
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
@@ -1772,7 +1749,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev,
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
hotplug_trigger, hpd_status_i915,
i9xx_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
}
}
@@ -1831,7 +1808,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
/* Call regardless, as some status bits might not be
* signalled in iir */
- valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
+ valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
/*
* VLV_IIR is single buffered, and reflects the level
@@ -1850,9 +1827,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
gen6_rps_irq_handler(dev_priv, pm_iir);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev, hotplug_status);
+ i9xx_hpd_irq_handler(dev_priv, hotplug_status);
- valleyview_pipestat_irq_handler(dev, pipe_stats);
+ valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
} while (0);
enable_rpm_wakeref_asserts(dev_priv);
@@ -1911,7 +1888,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
/* Call regardless, as some status bits might not be
* signalled in iir */
- valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
+ valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
/*
* VLV_IIR is single buffered, and reflects the level
@@ -1927,9 +1904,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
gen8_gt_irq_handler(dev_priv, gt_iir);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev, hotplug_status);
+ i9xx_hpd_irq_handler(dev_priv, hotplug_status);
- valleyview_pipestat_irq_handler(dev, pipe_stats);
+ valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
} while (0);
enable_rpm_wakeref_asserts(dev_priv);
@@ -1937,10 +1914,10 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
return ret;
}
-static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
+static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
+ u32 hotplug_trigger,
const u32 hpd[HPD_NUM_PINS])
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
/*
@@ -1966,16 +1943,15 @@ static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
dig_hotplug_reg, hpd,
pch_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
-static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
+static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
- ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
+ ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1985,10 +1961,10 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
}
if (pch_iir & SDE_AUX_MASK)
- dp_aux_irq_handler(dev);
+ dp_aux_irq_handler(dev_priv);
if (pch_iir & SDE_GMBUS)
- gmbus_irq_handler(dev);
+ gmbus_irq_handler(dev_priv);
if (pch_iir & SDE_AUDIO_HDCP_MASK)
DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
@@ -2018,9 +1994,8 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
}
-static void ivb_err_int_handler(struct drm_device *dev)
+static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 err_int = I915_READ(GEN7_ERR_INT);
enum pipe pipe;
@@ -2032,19 +2007,18 @@ static void ivb_err_int_handler(struct drm_device *dev)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
- if (IS_IVYBRIDGE(dev))
- ivb_pipe_crc_irq_handler(dev, pipe);
+ if (IS_IVYBRIDGE(dev_priv))
+ ivb_pipe_crc_irq_handler(dev_priv, pipe);
else
- hsw_pipe_crc_irq_handler(dev, pipe);
+ hsw_pipe_crc_irq_handler(dev_priv, pipe);
}
}
I915_WRITE(GEN7_ERR_INT, err_int);
}
-static void cpt_serr_int_handler(struct drm_device *dev)
+static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 serr_int = I915_READ(SERR_INT);
if (serr_int & SERR_INT_POISON)
@@ -2062,13 +2036,12 @@ static void cpt_serr_int_handler(struct drm_device *dev)
I915_WRITE(SERR_INT, serr_int);
}
-static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
+static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
- ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
+ ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2078,10 +2051,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
}
if (pch_iir & SDE_AUX_MASK_CPT)
- dp_aux_irq_handler(dev);
+ dp_aux_irq_handler(dev_priv);
if (pch_iir & SDE_GMBUS_CPT)
- gmbus_irq_handler(dev);
+ gmbus_irq_handler(dev_priv);
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
@@ -2096,12 +2069,11 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
I915_READ(FDI_RX_IIR(pipe)));
if (pch_iir & SDE_ERROR_CPT)
- cpt_serr_int_handler(dev);
+ cpt_serr_int_handler(dev_priv);
}
-static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
+static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
~SDE_PORTE_HOTPLUG_SPT;
u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
@@ -2130,16 +2102,16 @@ static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
}
if (pin_mask)
- intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
if (pch_iir & SDE_GMBUS_CPT)
- gmbus_irq_handler(dev);
+ gmbus_irq_handler(dev_priv);
}
-static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
+static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
+ u32 hotplug_trigger,
const u32 hpd[HPD_NUM_PINS])
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
@@ -2149,97 +2121,93 @@ static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
dig_hotplug_reg, hpd,
ilk_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
-static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
+static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
+ u32 de_iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
+ ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
if (de_iir & DE_AUX_CHANNEL_A)
- dp_aux_irq_handler(dev);
+ dp_aux_irq_handler(dev_priv);
if (de_iir & DE_GSE)
- intel_opregion_asle_intr(dev);
+ intel_opregion_asle_intr(dev_priv);
if (de_iir & DE_POISON)
DRM_ERROR("Poison interrupt\n");
for_each_pipe(dev_priv, pipe) {
if (de_iir & DE_PIPE_VBLANK(pipe) &&
- intel_pipe_handle_vblank(dev, pipe))
- intel_check_page_flip(dev, pipe);
+ intel_pipe_handle_vblank(dev_priv, pipe))
+ intel_check_page_flip(dev_priv, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
if (de_iir & DE_PIPE_CRC_DONE(pipe))
- i9xx_pipe_crc_irq_handler(dev, pipe);
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
/* plane/pipes map 1:1 on ilk+ */
- if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
- intel_prepare_page_flip(dev, pipe);
- intel_finish_page_flip_plane(dev, pipe);
- }
+ if (de_iir & DE_PLANE_FLIP_DONE(pipe))
+ intel_finish_page_flip_cs(dev_priv, pipe);
}
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
u32 pch_iir = I915_READ(SDEIIR);
- if (HAS_PCH_CPT(dev))
- cpt_irq_handler(dev, pch_iir);
+ if (HAS_PCH_CPT(dev_priv))
+ cpt_irq_handler(dev_priv, pch_iir);
else
- ibx_irq_handler(dev, pch_iir);
+ ibx_irq_handler(dev_priv, pch_iir);
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
}
- if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
- ironlake_rps_change_irq_handler(dev);
+ if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
+ ironlake_rps_change_irq_handler(dev_priv);
}
-static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
+static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
+ u32 de_iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
+ ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
if (de_iir & DE_ERR_INT_IVB)
- ivb_err_int_handler(dev);
+ ivb_err_int_handler(dev_priv);
if (de_iir & DE_AUX_CHANNEL_A_IVB)
- dp_aux_irq_handler(dev);
+ dp_aux_irq_handler(dev_priv);
if (de_iir & DE_GSE_IVB)
- intel_opregion_asle_intr(dev);
+ intel_opregion_asle_intr(dev_priv);
for_each_pipe(dev_priv, pipe) {
if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
- intel_pipe_handle_vblank(dev, pipe))
- intel_check_page_flip(dev, pipe);
+ intel_pipe_handle_vblank(dev_priv, pipe))
+ intel_check_page_flip(dev_priv, pipe);
/* plane/pipes map 1:1 on ilk+ */
- if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
- intel_prepare_page_flip(dev, pipe);
- intel_finish_page_flip_plane(dev, pipe);
- }
+ if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
+ intel_finish_page_flip_cs(dev_priv, pipe);
}
/* check event from PCH */
- if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
+ if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
u32 pch_iir = I915_READ(SDEIIR);
- cpt_irq_handler(dev, pch_iir);
+ cpt_irq_handler(dev_priv, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
@@ -2277,7 +2245,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
* able to process them after we restore SDEIER (as soon as we restore
* it, we'll get an interrupt if SDEIIR still has something to process
* due to its back queue). */
- if (!HAS_PCH_NOP(dev)) {
+ if (!HAS_PCH_NOP(dev_priv)) {
sde_ier = I915_READ(SDEIER);
I915_WRITE(SDEIER, 0);
POSTING_READ(SDEIER);
@@ -2289,7 +2257,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (gt_iir) {
I915_WRITE(GTIIR, gt_iir);
ret = IRQ_HANDLED;
- if (INTEL_INFO(dev)->gen >= 6)
+ if (INTEL_GEN(dev_priv) >= 6)
snb_gt_irq_handler(dev_priv, gt_iir);
else
ilk_gt_irq_handler(dev_priv, gt_iir);
@@ -2299,13 +2267,13 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (de_iir) {
I915_WRITE(DEIIR, de_iir);
ret = IRQ_HANDLED;
- if (INTEL_INFO(dev)->gen >= 7)
- ivb_display_irq_handler(dev, de_iir);
+ if (INTEL_GEN(dev_priv) >= 7)
+ ivb_display_irq_handler(dev_priv, de_iir);
else
- ilk_display_irq_handler(dev, de_iir);
+ ilk_display_irq_handler(dev_priv, de_iir);
}
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
u32 pm_iir = I915_READ(GEN6_PMIIR);
if (pm_iir) {
I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -2316,7 +2284,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
- if (!HAS_PCH_NOP(dev)) {
+ if (!HAS_PCH_NOP(dev_priv)) {
I915_WRITE(SDEIER, sde_ier);
POSTING_READ(SDEIER);
}
@@ -2327,10 +2295,10 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
return ret;
}
-static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
+static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
+ u32 hotplug_trigger,
const u32 hpd[HPD_NUM_PINS])
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
@@ -2340,13 +2308,12 @@ static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
dig_hotplug_reg, hpd,
bxt_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
- struct drm_device *dev = dev_priv->dev;
irqreturn_t ret = IRQ_NONE;
u32 iir;
enum pipe pipe;
@@ -2357,7 +2324,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(GEN8_DE_MISC_IIR, iir);
ret = IRQ_HANDLED;
if (iir & GEN8_DE_MISC_GSE)
- intel_opregion_asle_intr(dev);
+ intel_opregion_asle_intr(dev_priv);
else
DRM_ERROR("Unexpected DE Misc interrupt\n");
}
@@ -2381,26 +2348,28 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
GEN9_AUX_CHANNEL_D;
if (iir & tmp_mask) {
- dp_aux_irq_handler(dev);
+ dp_aux_irq_handler(dev_priv);
found = true;
}
if (IS_BROXTON(dev_priv)) {
tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
if (tmp_mask) {
- bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt);
+ bxt_hpd_irq_handler(dev_priv, tmp_mask,
+ hpd_bxt);
found = true;
}
} else if (IS_BROADWELL(dev_priv)) {
tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
if (tmp_mask) {
- ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw);
+ ilk_hpd_irq_handler(dev_priv,
+ tmp_mask, hpd_bdw);
found = true;
}
}
- if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) {
- gmbus_irq_handler(dev);
+ if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
+ gmbus_irq_handler(dev_priv);
found = true;
}
@@ -2427,8 +2396,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK &&
- intel_pipe_handle_vblank(dev, pipe))
- intel_check_page_flip(dev, pipe);
+ intel_pipe_handle_vblank(dev_priv, pipe))
+ intel_check_page_flip(dev_priv, pipe);
flip_done = iir;
if (INTEL_INFO(dev_priv)->gen >= 9)
@@ -2436,13 +2405,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
else
flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
- if (flip_done) {
- intel_prepare_page_flip(dev, pipe);
- intel_finish_page_flip_plane(dev, pipe);
- }
+ if (flip_done)
+ intel_finish_page_flip_cs(dev_priv, pipe);
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
- hsw_pipe_crc_irq_handler(dev, pipe);
+ hsw_pipe_crc_irq_handler(dev_priv, pipe);
if (iir & GEN8_PIPE_FIFO_UNDERRUN)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
@@ -2459,7 +2426,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
fault_errors);
}
- if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
+ if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
master_ctl & GEN8_DE_PCH_IRQ) {
/*
* FIXME(BDW): Assume for now that the new interrupt handling
@@ -2472,9 +2439,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
ret = IRQ_HANDLED;
if (HAS_PCH_SPT(dev_priv))
- spt_irq_handler(dev, iir);
+ spt_irq_handler(dev_priv, iir);
else
- cpt_irq_handler(dev, iir);
+ cpt_irq_handler(dev_priv, iir);
} else {
/*
* Like on previous PCH there seems to be something
@@ -2555,15 +2522,15 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
* Fire an error uevent so userspace can see that a hang or error
* was detected.
*/
-static void i915_reset_and_wakeup(struct drm_device *dev)
+static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct kobject *kobj = &dev_priv->dev->primary->kdev->kobj;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
int ret;
- kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
+ kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
/*
* Note that there's only one work item which does gpu resets, so we
@@ -2577,8 +2544,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
*/
if (i915_reset_in_progress(&dev_priv->gpu_error)) {
DRM_DEBUG_DRIVER("resetting chip\n");
- kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
- reset_event);
+ kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
/*
* In most cases it's guaranteed that we get here with an RPM
@@ -2589,7 +2555,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
*/
intel_runtime_pm_get(dev_priv);
- intel_prepare_reset(dev);
+ intel_prepare_reset(dev_priv);
/*
* All state reset _must_ be completed before we update the
@@ -2597,14 +2563,14 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
* pending state and not properly drop locks, resulting in
* deadlocks with the reset work.
*/
- ret = i915_reset(dev);
+ ret = i915_reset(dev_priv);
- intel_finish_reset(dev);
+ intel_finish_reset(dev_priv);
intel_runtime_pm_put(dev_priv);
if (ret == 0)
- kobject_uevent_env(&dev->primary->kdev->kobj,
+ kobject_uevent_env(kobj,
KOBJ_CHANGE, reset_done_event);
/*
@@ -2615,9 +2581,8 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
}
}
-static void i915_report_and_clear_eir(struct drm_device *dev)
+static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t instdone[I915_NUM_INSTDONE_REG];
u32 eir = I915_READ(EIR);
int pipe, i;
@@ -2627,9 +2592,9 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
pr_err("render error detected, EIR: 0x%08x\n", eir);
- i915_get_extra_instdone(dev, instdone);
+ i915_get_extra_instdone(dev_priv, instdone);
- if (IS_G4X(dev)) {
+ if (IS_G4X(dev_priv)) {
if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
u32 ipeir = I915_READ(IPEIR_I965);
@@ -2651,7 +2616,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
}
}
- if (!IS_GEN2(dev)) {
+ if (!IS_GEN2(dev_priv)) {
if (eir & I915_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
pr_err("page table error\n");
@@ -2673,7 +2638,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
for (i = 0; i < ARRAY_SIZE(instdone); i++)
pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
u32 ipeir = I915_READ(IPEIR);
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
@@ -2717,10 +2682,10 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
-void i915_handle_error(struct drm_device *dev, u32 engine_mask,
+void i915_handle_error(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
const char *fmt, ...)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
va_list args;
char error_msg[80];
@@ -2728,8 +2693,8 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
vscnprintf(error_msg, sizeof(error_msg), fmt, args);
va_end(args);
- i915_capture_error_state(dev, engine_mask, error_msg);
- i915_report_and_clear_eir(dev);
+ i915_capture_error_state(dev_priv, engine_mask, error_msg);
+ i915_report_and_clear_eir(dev_priv);
if (engine_mask) {
atomic_or(I915_RESET_IN_PROGRESS_FLAG,
@@ -2751,7 +2716,7 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
i915_error_wake_up(dev_priv, false);
}
- i915_reset_and_wakeup(dev);
+ i915_reset_and_wakeup(dev_priv);
}
/* Called from drm generic code, passed 'crtc' which
@@ -2869,9 +2834,9 @@ ring_idle(struct intel_engine_cs *engine, u32 seqno)
}
static bool
-ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
+ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr)
{
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
return (ipehr >> 23) == 0x1c;
} else {
ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
@@ -2884,10 +2849,10 @@ static struct intel_engine_cs *
semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
u64 offset)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
struct intel_engine_cs *signaller;
- if (INTEL_INFO(dev_priv)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
for_each_engine(signaller, dev_priv) {
if (engine == signaller)
continue;
@@ -2916,7 +2881,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
static struct intel_engine_cs *
semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u32 cmd, ipehr, head;
u64 offset = 0;
int i, backwards;
@@ -2942,7 +2907,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
return NULL;
ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
- if (!ipehr_is_semaphore_wait(engine->dev, ipehr))
+ if (!ipehr_is_semaphore_wait(engine->i915, ipehr))
return NULL;
/*
@@ -2954,7 +2919,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
* ringbuffer itself.
*/
head = I915_READ_HEAD(engine) & HEAD_ADDR;
- backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4;
+ backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
for (i = backwards; i; --i) {
/*
@@ -2976,7 +2941,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
return NULL;
*seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
- if (INTEL_INFO(engine->dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
offset = ioread32(engine->buffer->virtual_start + head + 12);
offset <<= 32;
offset = ioread32(engine->buffer->virtual_start + head + 8);
@@ -2986,7 +2951,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
static int semaphore_passed(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
struct intel_engine_cs *signaller;
u32 seqno;
@@ -3028,7 +2993,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
if (engine->id != RCS)
return true;
- i915_get_extra_instdone(engine->dev, instdone);
+ i915_get_extra_instdone(engine->i915, instdone);
/* There might be unstable subunit states even when
* actual head is not moving. Filter out the unstable ones by
@@ -3069,8 +3034,7 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
static enum intel_ring_hangcheck_action
ring_stuck(struct intel_engine_cs *engine, u64 acthd)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
enum intel_ring_hangcheck_action ha;
u32 tmp;
@@ -3078,7 +3042,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
if (ha != HANGCHECK_HUNG)
return ha;
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
return HANGCHECK_HUNG;
/* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -3088,19 +3052,19 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
*/
tmp = I915_READ_CTL(engine);
if (tmp & RING_WAIT) {
- i915_handle_error(dev, 0,
+ i915_handle_error(dev_priv, 0,
"Kicking stuck wait on %s",
engine->name);
I915_WRITE_CTL(engine, tmp);
return HANGCHECK_KICK;
}
- if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
+ if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
switch (semaphore_passed(engine)) {
default:
return HANGCHECK_HUNG;
case 1:
- i915_handle_error(dev, 0,
+ i915_handle_error(dev_priv, 0,
"Kicking stuck semaphore on %s",
engine->name);
I915_WRITE_CTL(engine, tmp);
@@ -3115,7 +3079,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
static unsigned kick_waiters(struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = to_i915(engine->dev);
+ struct drm_i915_private *i915 = engine->i915;
unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
if (engine->hangcheck.user_interrupts == user_interrupts &&
@@ -3144,7 +3108,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
gpu_error.hangcheck_work.work);
- struct drm_device *dev = dev_priv->dev;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int busy_count = 0, rings_hung = 0;
@@ -3272,22 +3235,22 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
}
if (rings_hung) {
- i915_handle_error(dev, rings_hung, "Engine(s) hung");
+ i915_handle_error(dev_priv, rings_hung, "Engine(s) hung");
goto out;
}
if (busy_count)
/* Reset timer case chip hangs without another request
* being added */
- i915_queue_hangcheck(dev);
+ i915_queue_hangcheck(dev_priv);
out:
ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
}
-void i915_queue_hangcheck(struct drm_device *dev)
+void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
{
- struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
+ struct i915_gpu_error *e = &dev_priv->gpu_error;
if (!i915.enable_hangcheck)
return;
@@ -3500,31 +3463,29 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
}
-static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
+static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
const u32 hpd[HPD_NUM_PINS])
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
u32 enabled_irqs = 0;
- for_each_intel_encoder(dev, encoder)
+ for_each_intel_encoder(dev_priv->dev, encoder)
if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
enabled_irqs |= hpd[encoder->hpd_pin];
return enabled_irqs;
}
-static void ibx_hpd_irq_setup(struct drm_device *dev)
+static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_irqs, hotplug, enabled_irqs;
- if (HAS_PCH_IBX(dev)) {
+ if (HAS_PCH_IBX(dev_priv)) {
hotplug_irqs = SDE_HOTPLUG_MASK;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
} else {
hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
}
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3543,18 +3504,17 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
* When CPU and PCH are on the same package, port A
* HPD must be enabled in both north and south.
*/
- if (HAS_PCH_LPT_LP(dev))
+ if (HAS_PCH_LPT_LP(dev_priv))
hotplug |= PORTA_HOTPLUG_ENABLE;
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
-static void spt_hpd_irq_setup(struct drm_device *dev)
+static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_irqs, hotplug, enabled_irqs;
hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3569,24 +3529,23 @@ static void spt_hpd_irq_setup(struct drm_device *dev)
I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
}
-static void ilk_hpd_irq_setup(struct drm_device *dev)
+static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_irqs, hotplug, enabled_irqs;
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
- } else if (INTEL_INFO(dev)->gen >= 7) {
+ } else if (INTEL_GEN(dev_priv) >= 7) {
hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
} else {
hotplug_irqs = DE_DP_A_HOTPLUG;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
}
@@ -3601,15 +3560,14 @@ static void ilk_hpd_irq_setup(struct drm_device *dev)
hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
- ibx_hpd_irq_setup(dev);
+ ibx_hpd_irq_setup(dev_priv);
}
-static void bxt_hpd_irq_setup(struct drm_device *dev)
+static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_irqs, hotplug, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3827,6 +3785,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
uint32_t de_pipe_enables;
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
u32 de_port_enables;
+ u32 de_misc_masked = GEN8_DE_MISC_GSE;
enum pipe pipe;
if (INTEL_INFO(dev_priv)->gen >= 9) {
@@ -3862,6 +3821,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
de_pipe_enables);
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
+ GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
}
static int gen8_irq_postinstall(struct drm_device *dev)
@@ -4006,13 +3966,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
/*
* Returns true when a page flip has completed.
*/
-static bool i8xx_handle_vblank(struct drm_device *dev,
+static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
int plane, int pipe, u32 iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
- if (!intel_pipe_handle_vblank(dev, pipe))
+ if (!intel_pipe_handle_vblank(dev_priv, pipe))
return false;
if ((iir & flip_pending) == 0)
@@ -4027,12 +3986,11 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
if (I915_READ16(ISR) & flip_pending)
goto check_page_flip;
- intel_prepare_page_flip(dev, plane);
- intel_finish_page_flip(dev, pipe);
+ intel_finish_page_flip_cs(dev_priv, pipe);
return true;
check_page_flip:
- intel_check_page_flip(dev, pipe);
+ intel_check_page_flip(dev_priv, pipe);
return false;
}
@@ -4089,15 +4047,15 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
- if (HAS_FBC(dev))
+ if (HAS_FBC(dev_priv))
plane = !plane;
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
- i8xx_handle_vblank(dev, plane, pipe, iir))
+ i8xx_handle_vblank(dev_priv, plane, pipe, iir))
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev, pipe);
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(dev_priv,
@@ -4182,7 +4140,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_WRITE(IER, enable_mask);
POSTING_READ(IER);
- i915_enable_asle_pipestat(dev);
+ i915_enable_asle_pipestat(dev_priv);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -4197,13 +4155,12 @@ static int i915_irq_postinstall(struct drm_device *dev)
/*
* Returns true when a page flip has completed.
*/
-static bool i915_handle_vblank(struct drm_device *dev,
+static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
int plane, int pipe, u32 iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
- if (!intel_pipe_handle_vblank(dev, pipe))
+ if (!intel_pipe_handle_vblank(dev_priv, pipe))
return false;
if ((iir & flip_pending) == 0)
@@ -4218,12 +4175,11 @@ static bool i915_handle_vblank(struct drm_device *dev,
if (I915_READ(ISR) & flip_pending)
goto check_page_flip;
- intel_prepare_page_flip(dev, plane);
- intel_finish_page_flip(dev, pipe);
+ intel_finish_page_flip_cs(dev_priv, pipe);
return true;
check_page_flip:
- intel_check_page_flip(dev, pipe);
+ intel_check_page_flip(dev_priv, pipe);
return false;
}
@@ -4273,11 +4229,11 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
break;
/* Consume port. Then clear IIR or we'll miss events */
- if (I915_HAS_HOTPLUG(dev) &&
+ if (I915_HAS_HOTPLUG(dev_priv) &&
iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev, hotplug_status);
+ i9xx_hpd_irq_handler(dev_priv, hotplug_status);
}
I915_WRITE(IIR, iir & ~flip_mask);
@@ -4288,18 +4244,18 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
- if (HAS_FBC(dev))
+ if (HAS_FBC(dev_priv))
plane = !plane;
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
- i915_handle_vblank(dev, plane, pipe, iir))
+ i915_handle_vblank(dev_priv, plane, pipe, iir))
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev, pipe);
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(dev_priv,
@@ -4307,7 +4263,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
- intel_opregion_asle_intr(dev);
+ intel_opregion_asle_intr(dev_priv);
/* With MSI, interrupts are only generated when iir
* transitions from zero to nonzero. If another bit got
@@ -4391,7 +4347,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
enable_mask |= I915_USER_INTERRUPT;
- if (IS_G4X(dev))
+ if (IS_G4X(dev_priv))
enable_mask |= I915_BSD_USER_INTERRUPT;
/* Interrupt setup is already guaranteed to be single-threaded, this is
@@ -4406,7 +4362,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
* Enable some error detection, note the instruction error mask
* bit is reserved, so we leave it masked.
*/
- if (IS_G4X(dev)) {
+ if (IS_G4X(dev_priv)) {
error_mask = ~(GM45_ERROR_PAGE_TABLE |
GM45_ERROR_MEM_PRIV |
GM45_ERROR_CP_PRIV |
@@ -4424,26 +4380,25 @@ static int i965_irq_postinstall(struct drm_device *dev)
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
POSTING_READ(PORT_HOTPLUG_EN);
- i915_enable_asle_pipestat(dev);
+ i915_enable_asle_pipestat(dev_priv);
return 0;
}
-static void i915_hpd_irq_setup(struct drm_device *dev)
+static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_en;
assert_spin_locked(&dev_priv->irq_lock);
/* Note HDMI and DP share hotplug bits */
/* enable bits are the same for all generations */
- hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
+ hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
/* Programming the CRT detection parameters tends
to generate a spurious hotplug event about three
seconds later. So just do it once.
*/
- if (IS_G4X(dev))
+ if (IS_G4X(dev_priv))
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
@@ -4510,7 +4465,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev, hotplug_status);
+ i9xx_hpd_irq_handler(dev_priv, hotplug_status);
}
I915_WRITE(IIR, iir & ~flip_mask);
@@ -4523,24 +4478,24 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
- i915_handle_vblank(dev, pipe, pipe, iir))
+ i915_handle_vblank(dev_priv, pipe, pipe, iir))
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev, pipe);
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
- intel_opregion_asle_intr(dev);
+ intel_opregion_asle_intr(dev_priv);
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
- gmbus_irq_handler(dev);
+ gmbus_irq_handler(dev_priv);
/* With MSI, interrupts are only generated when iir
* transitions from zero to nonzero. If another bit got
@@ -4611,6 +4566,20 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
+ dev_priv->rps.pm_intr_keep = 0;
+
+ /*
+ * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
+ * if GEN6_PM_UP_EI_EXPIRED is masked.
+ *
+ * TODO: verify if this can be reproduced on VLV,CHV.
+ */
+ if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
+ dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
+
+ if (INTEL_INFO(dev_priv)->gen >= 8)
+ dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+
INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
i915_hangcheck_elapsed);
@@ -4674,12 +4643,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->disable_vblank = ironlake_disable_vblank;
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
} else {
- if (INTEL_INFO(dev_priv)->gen == 2) {
+ if (IS_GEN2(dev_priv)) {
dev->driver->irq_preinstall = i8xx_irq_preinstall;
dev->driver->irq_postinstall = i8xx_irq_postinstall;
dev->driver->irq_handler = i8xx_irq_handler;
dev->driver->irq_uninstall = i8xx_irq_uninstall;
- } else if (INTEL_INFO(dev_priv)->gen == 3) {
+ } else if (IS_GEN3(dev_priv)) {
dev->driver->irq_preinstall = i915_irq_preinstall;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 1779f02e6df8..5e18cf9f754d 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -54,10 +54,12 @@ struct i915_params i915 __read_mostly = {
.verbose_state_checks = 1,
.nuclear_pageflip = 0,
.edp_vswing = 0,
- .enable_guc_submission = false,
+ .enable_guc_loading = 0,
+ .enable_guc_submission = 0,
.guc_log_level = -1,
.enable_dp_mst = true,
.inject_load_failure = 0,
+ .enable_dpcd_backlight = false,
};
module_param_named(modeset, i915.modeset, int, 0400);
@@ -197,8 +199,15 @@ MODULE_PARM_DESC(edp_vswing,
"(0=use value from vbt [default], 1=low power swing(200mV),"
"2=default swing(400mV))");
-module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, bool, 0400);
-MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)");
+module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400);
+MODULE_PARM_DESC(enable_guc_loading,
+ "Enable GuC firmware loading "
+ "(-1=auto, 0=never [default], 1=if available, 2=required)");
+
+module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400);
+MODULE_PARM_DESC(enable_guc_submission,
+ "Enable GuC submission "
+ "(-1=auto, 0=never [default], 1=if available, 2=required)");
module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
MODULE_PARM_DESC(guc_log_level,
@@ -210,3 +219,6 @@ MODULE_PARM_DESC(enable_dp_mst,
module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
MODULE_PARM_DESC(inject_load_failure,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
+module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600);
+MODULE_PARM_DESC(enable_dpcd_backlight,
+ "Enable support for DPCD backlight control (default:false)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 02bc27804291..1323261a0cdd 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -45,6 +45,8 @@ struct i915_params {
int enable_ips;
int invert_brightness;
int enable_cmd_parser;
+ int enable_guc_loading;
+ int enable_guc_submission;
int guc_log_level;
int use_mmio_flip;
int mmio_debug;
@@ -57,10 +59,10 @@ struct i915_params {
bool load_detect_test;
bool reset;
bool disable_display;
- bool enable_guc_submission;
bool verbose_state_checks;
bool nuclear_pageflip;
bool enable_dp_mst;
+ bool enable_dpcd_backlight;
};
extern struct i915_params i915 __read_mostly;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b407411e31ba..dfb4c7a88de3 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -886,7 +886,7 @@ enum skl_disp_power_wells {
* PLLs can be routed to any transcoder A/B/C.
*
* Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
- * digital port D (CHV) or port A (BXT).
+ * digital port D (CHV) or port A (BXT). ::
*
*
* Dual channel PHY (VLV/CHV/BXT)
@@ -2449,6 +2449,8 @@ enum skl_disp_power_wells {
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+#define RAWCLK_FREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6024)
+
#define _FPA0 0x6040
#define _FPA1 0x6044
#define _FPB0 0x6048
@@ -6031,6 +6033,7 @@ enum skl_disp_power_wells {
#define CHICKEN_PAR1_1 _MMIO(0x42080)
#define DPA_MASK_VBLANK_SRD (1 << 15)
#define FORCE_ARB_IDLE_PLANES (1 << 14)
+#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
@@ -6089,7 +6092,14 @@ enum skl_disp_power_wells {
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
#define GEN8_L3SQCREG1 _MMIO(0xB100)
-#define BDW_WA_L3SQCREG1_DEFAULT 0x784000
+/*
+ * Note that on CHV the following has an off-by-one error wrt. to BSpec.
+ * Using the formula in BSpec leads to a hang, while the formula here works
+ * fine and matches the formulas for all other platforms. A BSpec change
+ * request has been filed to clarify this.
+ */
+#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
+#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
@@ -7021,7 +7031,7 @@ enum skl_disp_power_wells {
#define VLV_RCEDATA _MMIO(0xA0BC)
#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
#define GEN6_PMINTRMSK _MMIO(0xA168)
-#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
+#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
#define VLV_PWRDWNUPCTL _MMIO(0xA294)
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
@@ -7557,14 +7567,15 @@ enum skl_disp_power_wells {
#define CDCLK_FREQ_540 (1<<26)
#define CDCLK_FREQ_337_308 (2<<26)
#define CDCLK_FREQ_675_617 (3<<26)
-#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
-
#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
+#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
+#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
+#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
/* LCPLL_CTL */
#define LCPLL1_CTL _MMIO(0x46010)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 2d576b7ff299..02507bfc8def 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -43,7 +43,7 @@ static u32 calc_residency(struct drm_device *dev,
u64 units = 128ULL, div = 100000ULL;
u32 ret;
- if (!intel_enable_rc6(dev))
+ if (!intel_enable_rc6())
return 0;
intel_runtime_pm_get(dev_priv);
@@ -70,8 +70,7 @@ static u32 calc_residency(struct drm_device *dev,
static ssize_t
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = dev_to_drm_minor(kdev);
- return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
+ return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6());
}
static ssize_t
@@ -204,7 +203,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
u32 *temp = NULL; /* Just here to make handling failures easy */
int slice = (int)(uintptr_t)attr->private;
int ret;
@@ -397,7 +396,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
- intel_set_rps(dev, val);
+ intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -461,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
- intel_set_rps(dev, val);
+ intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index dc0def210097..6768db032f84 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
),
TP_fast_assign(
- __entry->dev = from->dev->primary->index;
+ __entry->dev = from->i915->dev->primary->index;
__entry->sync_from = from->id;
__entry->sync_to = to_req->engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
@@ -486,13 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch,
),
TP_fast_assign(
- struct intel_engine_cs *engine =
- i915_gem_request_get_engine(req);
- __entry->dev = engine->dev->primary->index;
- __entry->ring = engine->id;
- __entry->seqno = i915_gem_request_get_seqno(req);
+ __entry->dev = req->i915->dev->primary->index;
+ __entry->ring = req->engine->id;
+ __entry->seqno = req->seqno;
__entry->flags = flags;
- i915_trace_irq_get(engine, req);
+ i915_trace_irq_get(req->engine, req);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -511,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush,
),
TP_fast_assign(
- __entry->dev = req->engine->dev->primary->index;
+ __entry->dev = req->i915->dev->primary->index;
__entry->ring = req->engine->id;
__entry->invalidate = invalidate;
__entry->flush = flush;
@@ -533,11 +531,9 @@ DECLARE_EVENT_CLASS(i915_gem_request,
),
TP_fast_assign(
- struct intel_engine_cs *engine =
- i915_gem_request_get_engine(req);
- __entry->dev = engine->dev->primary->index;
- __entry->ring = engine->id;
- __entry->seqno = i915_gem_request_get_seqno(req);
+ __entry->dev = req->i915->dev->primary->index;
+ __entry->ring = req->engine->id;
+ __entry->seqno = req->seqno;
),
TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -560,7 +556,7 @@ TRACE_EVENT(i915_gem_request_notify,
),
TP_fast_assign(
- __entry->dev = engine->dev->primary->index;
+ __entry->dev = engine->i915->dev->primary->index;
__entry->ring = engine->id;
__entry->seqno = engine->get_seqno(engine);
),
@@ -597,13 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin,
* less desirable.
*/
TP_fast_assign(
- struct intel_engine_cs *engine =
- i915_gem_request_get_engine(req);
- __entry->dev = engine->dev->primary->index;
- __entry->ring = engine->id;
- __entry->seqno = i915_gem_request_get_seqno(req);
+ __entry->dev = req->i915->dev->primary->index;
+ __entry->ring = req->engine->id;
+ __entry->seqno = req->seqno;
__entry->blocking =
- mutex_is_locked(&engine->dev->struct_mutex);
+ mutex_is_locked(&req->i915->dev->struct_mutex);
),
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
@@ -740,12 +734,12 @@ DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
* the context.
*/
DECLARE_EVENT_CLASS(i915_context,
- TP_PROTO(struct intel_context *ctx),
+ TP_PROTO(struct i915_gem_context *ctx),
TP_ARGS(ctx),
TP_STRUCT__entry(
__field(u32, dev)
- __field(struct intel_context *, ctx)
+ __field(struct i915_gem_context *, ctx)
__field(struct i915_address_space *, vm)
),
@@ -760,12 +754,12 @@ DECLARE_EVENT_CLASS(i915_context,
)
DEFINE_EVENT(i915_context, i915_context_create,
- TP_PROTO(struct intel_context *ctx),
+ TP_PROTO(struct i915_gem_context *ctx),
TP_ARGS(ctx)
);
DEFINE_EVENT(i915_context, i915_context_free,
- TP_PROTO(struct intel_context *ctx),
+ TP_PROTO(struct i915_gem_context *ctx),
TP_ARGS(ctx)
);
@@ -777,13 +771,13 @@ DEFINE_EVENT(i915_context, i915_context_free,
* called only if full ppgtt is enabled.
*/
TRACE_EVENT(switch_mm,
- TP_PROTO(struct intel_engine_cs *engine, struct intel_context *to),
+ TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to),
TP_ARGS(engine, to),
TP_STRUCT__entry(
__field(u32, ring)
- __field(struct intel_context *, to)
+ __field(struct i915_gem_context *, to)
__field(struct i915_address_space *, vm)
__field(u32, dev)
),
@@ -792,7 +786,7 @@ TRACE_EVENT(switch_mm,
__entry->ring = engine->id;
__entry->to = to;
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
- __entry->dev = engine->dev->primary->index;
+ __entry->dev = engine->i915->dev->primary->index;
),
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index d02efb8cad4d..004326291854 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -58,15 +58,14 @@
* This function is called at the initialization stage, to detect whether
* running on a vGPU.
*/
-void i915_check_vgpu(struct drm_device *dev)
+void i915_check_vgpu(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint64_t magic;
uint32_t version;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
- if (!IS_HASWELL(dev))
+ if (!IS_HASWELL(dev_priv))
return;
magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
@@ -136,7 +135,7 @@ static int vgt_balloon_space(struct drm_mm *mm,
/**
* intel_vgt_balloon - balloon out reserved graphics address trunks
- * @dev: drm device
+ * @dev_priv: i915 device
*
* This function is called at the initialization stage, to balloon out the
* graphic address space allocated to other vGPUs, by marking these spaces as
@@ -151,28 +150,28 @@ static int vgt_balloon_space(struct drm_mm *mm,
* of its graphic space being zero. Yet there are some portions ballooned out(
* the shadow part, which are marked as reserved by drm allocator). From the
* host point of view, the graphic address space is partitioned by multiple
- * vGPUs in different VMs.
+ * vGPUs in different VMs. ::
*
* vGPU1 view Host view
* 0 ------> +-----------+ +-----------+
- * ^ |///////////| | vGPU3 |
- * | |///////////| +-----------+
- * | |///////////| | vGPU2 |
+ * ^ |###########| | vGPU3 |
+ * | |###########| +-----------+
+ * | |###########| | vGPU2 |
* | +-----------+ +-----------+
* mappable GM | available | ==> | vGPU1 |
* | +-----------+ +-----------+
- * | |///////////| | |
- * v |///////////| | Host |
+ * | |###########| | |
+ * v |###########| | Host |
* +=======+===========+ +===========+
- * ^ |///////////| | vGPU3 |
- * | |///////////| +-----------+
- * | |///////////| | vGPU2 |
+ * ^ |###########| | vGPU3 |
+ * | |###########| +-----------+
+ * | |###########| | vGPU2 |
* | +-----------+ +-----------+
* unmappable GM | available | ==> | vGPU1 |
* | +-----------+ +-----------+
- * | |///////////| | |
- * | |///////////| | Host |
- * v |///////////| | |
+ * | |###########| | |
+ * | |###########| | Host |
+ * v |###########| | |
* total GM size ------> +-----------+ +-----------+
*
* Returns:
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 3c83b47b5f69..21ffcfea5f5d 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -110,7 +110,7 @@ struct vgt_if {
#define VGT_DRV_DISPLAY_NOT_READY 0
#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
-extern void i915_check_vgpu(struct drm_device *dev);
+extern void i915_check_vgpu(struct drm_i915_private *dev_priv);
extern int intel_vgt_balloon(struct drm_device *dev);
extern void intel_vgt_deballoon(void);
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 50ff90aea721..c5a166752eda 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -191,7 +191,7 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
/* plane scaler case: assign as a plane scaler */
/* find the plane that set the bit as scaler_user */
- plane = drm_state->planes[i];
+ plane = drm_state->planes[i].ptr;
/*
* to enable/disable hq mode, add planes that are using scaler
@@ -223,7 +223,8 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
continue;
}
- plane_state = to_intel_plane_state(drm_state->plane_states[i]);
+ plane_state = intel_atomic_get_existing_plane_state(drm_state,
+ intel_plane);
scaler_id = &plane_state->scaler_id;
}
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 02a7527ce7bb..b9329c2a670a 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -621,17 +621,11 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
static int i915_audio_component_get_cdclk_freq(struct device *dev)
{
struct drm_i915_private *dev_priv = dev_to_i915(dev);
- int ret;
if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
return -ENODEV;
- intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- ret = dev_priv->display.get_display_clock_speed(dev_priv->dev);
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
-
- return ret;
+ return dev_priv->cdclk_freq;
}
static int i915_audio_component_sync_audio_rate(struct device *dev,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b235b6e88ead..713a02db378a 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -139,6 +139,11 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
else
panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+ panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) |
+ dvo_timing->himage_lo;
+ panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) |
+ dvo_timing->vimage_lo;
+
/* Some VBTs have bogus h/vtotal values */
if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
@@ -213,7 +218,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
- ret = intel_opregion_get_panel_type(dev_priv->dev);
+ ret = intel_opregion_get_panel_type(dev_priv);
if (ret >= 0) {
WARN_ON(ret > 0xf);
panel_type = ret;
@@ -318,6 +323,15 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
return;
}
+ dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
+ if (bdb->version >= 191 &&
+ get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
+ const struct bdb_lfp_backlight_control_method *method;
+
+ method = &backlight_data->backlight_control[panel_type];
+ dev_priv->vbt.backlight.type = method->type;
+ }
+
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
@@ -763,6 +777,16 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
return;
}
+ /*
+ * These fields are introduced from the VBT version 197 onwards,
+ * so making sure that these bits are set zero in the previous
+ * versions.
+ */
+ if (dev_priv->vbt.dsi.config->dual_link && bdb->version < 197) {
+ dev_priv->vbt.dsi.config->dl_dcs_cabc_ports = 0;
+ dev_priv->vbt.dsi.config->dl_dcs_backlight_ports = 0;
+ }
+
/* We have mandatory mipi config blocks. Initialize as generic panel */
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
}
@@ -1187,7 +1211,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
}
if (bdb->version < 106) {
expected_size = 22;
- } else if (bdb->version < 109) {
+ } else if (bdb->version < 111) {
expected_size = 27;
} else if (bdb->version < 195) {
BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index ab0ea315eddb..8405b5a367d7 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -30,6 +30,14 @@
#ifndef _INTEL_BIOS_H_
#define _INTEL_BIOS_H_
+enum intel_backlight_type {
+ INTEL_BACKLIGHT_PMIC,
+ INTEL_BACKLIGHT_LPSS,
+ INTEL_BACKLIGHT_DISPLAY_DDI,
+ INTEL_BACKLIGHT_DSI_DCS,
+ INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE,
+};
+
struct edp_power_seq {
u16 t1_t3;
u16 t8;
@@ -113,7 +121,13 @@ struct mipi_config {
u16 dual_link:2;
u16 lane_cnt:2;
u16 pixel_overlap:3;
- u16 rsvd3:9;
+ u16 rgb_flip:1;
+#define DL_DCS_PORT_A 0x00
+#define DL_DCS_PORT_C 0x01
+#define DL_DCS_PORT_A_AND_C 0x02
+ u16 dl_dcs_cabc_ports:2;
+ u16 dl_dcs_backlight_ports:2;
+ u16 rsvd3:4;
u16 rsvd4;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 3fbb6fc66451..622968161ac7 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -839,7 +839,7 @@ void intel_crt_init(struct drm_device *dev)
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
+ DRM_MODE_ENCODER_DAC, "CRT");
intel_connector_attach_encoder(intel_connector, &crt->base);
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index a34c23eceba0..2b3b428d9cd2 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -41,16 +41,22 @@
* be moved to FW_FAILED.
*/
+#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
+MODULE_FIRMWARE(I915_CSR_KBL);
+#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
+
#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
+MODULE_FIRMWARE(I915_CSR_SKL);
+#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
+
#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
+MODULE_FIRMWARE(I915_CSR_BXT);
+#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
-MODULE_FIRMWARE(I915_CSR_SKL);
-MODULE_FIRMWARE(I915_CSR_BXT);
-#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
-#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
+
#define CSR_MAX_FW_SIZE 0x2FFF
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
@@ -169,12 +175,10 @@ struct stepping_info {
char substepping;
};
-/*
- * Kabylake derivated from Skylake H0, so SKL H0
- * is the right firmware for KBL A0 (revid 0).
- */
static const struct stepping_info kbl_stepping_info[] = {
- {'H', '0'}, {'I', '0'}
+ {'A', '0'}, {'B', '0'}, {'C', '0'},
+ {'D', '0'}, {'E', '0'}, {'F', '0'},
+ {'G', '0'}, {'H', '0'}, {'I', '0'},
};
static const struct stepping_info skl_stepping_info[] = {
@@ -298,7 +302,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_KABYLAKE(dev_priv)) {
+ required_min_version = KBL_CSR_VERSION_REQUIRED;
+ } else if (IS_SKYLAKE(dev_priv)) {
required_min_version = SKL_CSR_VERSION_REQUIRED;
} else if (IS_BROXTON(dev_priv)) {
required_min_version = BXT_CSR_VERSION_REQUIRED;
@@ -446,7 +452,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_KABYLAKE(dev_priv))
+ csr->fw_path = I915_CSR_KBL;
+ else if (IS_SKYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 01e523df363b..022b41d422dc 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -948,7 +948,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
{
struct intel_shared_dpll *pll;
struct intel_dpll_hw_state *state;
- intel_clock_t clock;
+ struct dpll clock;
/* For DDI ports we always use a shared PLL. */
if (WARN_ON(dpll == DPLL_ID_PRIVATE))
@@ -2347,7 +2347,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
encoder = &intel_encoder->base;
drm_encoder_init(dev, encoder, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->enable = intel_enable_ddi;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2113f401f0ba..60cba1956c0d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -48,6 +48,11 @@
#include <linux/reservation.h>
#include <linux/dma-buf.h>
+static bool is_mmio_work(struct intel_flip_work *work)
+{
+ return work->mmio_work.func;
+}
+
/* Primary plane formats for gen <= 3 */
static const uint32_t i8xx_primary_formats[] = {
DRM_FORMAT_C8,
@@ -117,20 +122,18 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc);
static void intel_modeset_setup_hw_state(struct drm_device *dev);
static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
+static int ilk_max_pixel_rate(struct drm_atomic_state *state);
+static int broxton_calc_cdclk(int max_pixclk);
-typedef struct {
- int min, max;
-} intel_range_t;
-
-typedef struct {
- int dot_limit;
- int p2_slow, p2_fast;
-} intel_p2_t;
-
-typedef struct intel_limit intel_limit_t;
struct intel_limit {
- intel_range_t dot, vco, n, m, m1, m2, p, p1;
- intel_p2_t p2;
+ struct {
+ int min, max;
+ } dot, vco, n, m, m1, m2, p, p1;
+
+ struct {
+ int dot_limit;
+ int p2_slow, p2_fast;
+ } p2;
};
/* returns HPLL frequency in kHz */
@@ -185,6 +188,7 @@ intel_pch_rawclk(struct drm_i915_private *dev_priv)
static int
intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
{
+ /* RAWCLK_FREQ_VLV register updated from power well code */
return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
CCK_DISPLAY_REF_CLOCK_CONTROL);
}
@@ -218,7 +222,7 @@ intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
}
}
-static void intel_update_rawclk(struct drm_i915_private *dev_priv)
+void intel_update_rawclk(struct drm_i915_private *dev_priv)
{
if (HAS_PCH_SPLIT(dev_priv))
dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
@@ -255,7 +259,7 @@ intel_fdi_link_freq(struct drm_i915_private *dev_priv,
return 270000;
}
-static const intel_limit_t intel_limits_i8xx_dac = {
+static const struct intel_limit intel_limits_i8xx_dac = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 908000, .max = 1512000 },
.n = { .min = 2, .max = 16 },
@@ -268,7 +272,7 @@ static const intel_limit_t intel_limits_i8xx_dac = {
.p2_slow = 4, .p2_fast = 2 },
};
-static const intel_limit_t intel_limits_i8xx_dvo = {
+static const struct intel_limit intel_limits_i8xx_dvo = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 908000, .max = 1512000 },
.n = { .min = 2, .max = 16 },
@@ -281,7 +285,7 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
.p2_slow = 4, .p2_fast = 4 },
};
-static const intel_limit_t intel_limits_i8xx_lvds = {
+static const struct intel_limit intel_limits_i8xx_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 908000, .max = 1512000 },
.n = { .min = 2, .max = 16 },
@@ -294,7 +298,7 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
.p2_slow = 14, .p2_fast = 7 },
};
-static const intel_limit_t intel_limits_i9xx_sdvo = {
+static const struct intel_limit intel_limits_i9xx_sdvo = {
.dot = { .min = 20000, .max = 400000 },
.vco = { .min = 1400000, .max = 2800000 },
.n = { .min = 1, .max = 6 },
@@ -307,7 +311,7 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
.p2_slow = 10, .p2_fast = 5 },
};
-static const intel_limit_t intel_limits_i9xx_lvds = {
+static const struct intel_limit intel_limits_i9xx_lvds = {
.dot = { .min = 20000, .max = 400000 },
.vco = { .min = 1400000, .max = 2800000 },
.n = { .min = 1, .max = 6 },
@@ -321,7 +325,7 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
};
-static const intel_limit_t intel_limits_g4x_sdvo = {
+static const struct intel_limit intel_limits_g4x_sdvo = {
.dot = { .min = 25000, .max = 270000 },
.vco = { .min = 1750000, .max = 3500000},
.n = { .min = 1, .max = 4 },
@@ -336,7 +340,7 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
},
};
-static const intel_limit_t intel_limits_g4x_hdmi = {
+static const struct intel_limit intel_limits_g4x_hdmi = {
.dot = { .min = 22000, .max = 400000 },
.vco = { .min = 1750000, .max = 3500000},
.n = { .min = 1, .max = 4 },
@@ -349,7 +353,7 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
.p2_slow = 10, .p2_fast = 5 },
};
-static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
+static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
.dot = { .min = 20000, .max = 115000 },
.vco = { .min = 1750000, .max = 3500000 },
.n = { .min = 1, .max = 3 },
@@ -363,7 +367,7 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
},
};
-static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
+static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
.dot = { .min = 80000, .max = 224000 },
.vco = { .min = 1750000, .max = 3500000 },
.n = { .min = 1, .max = 3 },
@@ -377,7 +381,7 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
},
};
-static const intel_limit_t intel_limits_pineview_sdvo = {
+static const struct intel_limit intel_limits_pineview_sdvo = {
.dot = { .min = 20000, .max = 400000},
.vco = { .min = 1700000, .max = 3500000 },
/* Pineview's Ncounter is a ring counter */
@@ -392,7 +396,7 @@ static const intel_limit_t intel_limits_pineview_sdvo = {
.p2_slow = 10, .p2_fast = 5 },
};
-static const intel_limit_t intel_limits_pineview_lvds = {
+static const struct intel_limit intel_limits_pineview_lvds = {
.dot = { .min = 20000, .max = 400000 },
.vco = { .min = 1700000, .max = 3500000 },
.n = { .min = 3, .max = 6 },
@@ -410,7 +414,7 @@ static const intel_limit_t intel_limits_pineview_lvds = {
* We calculate clock using (register_value + 2) for N/M1/M2, so here
* the range value for them is (actual_value - 2).
*/
-static const intel_limit_t intel_limits_ironlake_dac = {
+static const struct intel_limit intel_limits_ironlake_dac = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 5 },
@@ -423,7 +427,7 @@ static const intel_limit_t intel_limits_ironlake_dac = {
.p2_slow = 10, .p2_fast = 5 },
};
-static const intel_limit_t intel_limits_ironlake_single_lvds = {
+static const struct intel_limit intel_limits_ironlake_single_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
@@ -436,7 +440,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds = {
.p2_slow = 14, .p2_fast = 14 },
};
-static const intel_limit_t intel_limits_ironlake_dual_lvds = {
+static const struct intel_limit intel_limits_ironlake_dual_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
@@ -450,7 +454,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds = {
};
/* LVDS 100mhz refclk limits. */
-static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
+static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 2 },
@@ -463,7 +467,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
.p2_slow = 14, .p2_fast = 14 },
};
-static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
+static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
@@ -476,7 +480,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
.p2_slow = 7, .p2_fast = 7 },
};
-static const intel_limit_t intel_limits_vlv = {
+static const struct intel_limit intel_limits_vlv = {
/*
* These are the data rate limits (measured in fast clocks)
* since those are the strictest limits we have. The fast
@@ -492,7 +496,7 @@ static const intel_limit_t intel_limits_vlv = {
.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
};
-static const intel_limit_t intel_limits_chv = {
+static const struct intel_limit intel_limits_chv = {
/*
* These are the data rate limits (measured in fast clocks)
* since those are the strictest limits we have. The fast
@@ -508,7 +512,7 @@ static const intel_limit_t intel_limits_chv = {
.p2 = { .p2_slow = 1, .p2_fast = 14 },
};
-static const intel_limit_t intel_limits_bxt = {
+static const struct intel_limit intel_limits_bxt = {
/* FIXME: find real dot limits */
.dot = { .min = 0, .max = INT_MAX },
.vco = { .min = 4800000, .max = 6700000 },
@@ -581,7 +585,7 @@ static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
* divided-down version of it.
*/
/* m1 is reserved as 0 in Pineview, n is a ring counter */
-static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
+static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
@@ -598,7 +602,7 @@ static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
}
-static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
+static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
{
clock->m = i9xx_dpll_compute_m(clock);
clock->p = clock->p1 * clock->p2;
@@ -610,7 +614,7 @@ static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
return clock->dot;
}
-static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
+static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
{
clock->m = clock->m1 * clock->m2;
clock->p = clock->p1 * clock->p2;
@@ -622,7 +626,7 @@ static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
return clock->dot / 5;
}
-int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
+int chv_calc_dpll_params(int refclk, struct dpll *clock)
{
clock->m = clock->m1 * clock->m2;
clock->p = clock->p1 * clock->p2;
@@ -642,8 +646,8 @@ int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
*/
static bool intel_PLL_is_valid(struct drm_device *dev,
- const intel_limit_t *limit,
- const intel_clock_t *clock)
+ const struct intel_limit *limit,
+ const struct dpll *clock)
{
if (clock->n < limit->n.min || limit->n.max < clock->n)
INTELPllInvalid("n out of range\n");
@@ -678,7 +682,7 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
}
static int
-i9xx_select_p2_div(const intel_limit_t *limit,
+i9xx_select_p2_div(const struct intel_limit *limit,
const struct intel_crtc_state *crtc_state,
int target)
{
@@ -713,13 +717,13 @@ i9xx_select_p2_div(const intel_limit_t *limit,
* divider from @match_clock used for LVDS downclocking.
*/
static bool
-i9xx_find_best_dpll(const intel_limit_t *limit,
+i9xx_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
{
struct drm_device *dev = crtc_state->base.crtc->dev;
- intel_clock_t clock;
+ struct dpll clock;
int err = target;
memset(best_clock, 0, sizeof(*best_clock));
@@ -770,13 +774,13 @@ i9xx_find_best_dpll(const intel_limit_t *limit,
* divider from @match_clock used for LVDS downclocking.
*/
static bool
-pnv_find_best_dpll(const intel_limit_t *limit,
+pnv_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
{
struct drm_device *dev = crtc_state->base.crtc->dev;
- intel_clock_t clock;
+ struct dpll clock;
int err = target;
memset(best_clock, 0, sizeof(*best_clock));
@@ -825,13 +829,13 @@ pnv_find_best_dpll(const intel_limit_t *limit,
* divider from @match_clock used for LVDS downclocking.
*/
static bool
-g4x_find_best_dpll(const intel_limit_t *limit,
+g4x_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
{
struct drm_device *dev = crtc_state->base.crtc->dev;
- intel_clock_t clock;
+ struct dpll clock;
int max_n;
bool found = false;
/* approximately equals target * 0.00585 */
@@ -877,8 +881,8 @@ g4x_find_best_dpll(const intel_limit_t *limit,
* best configuration and error found so far. Return the calculated error.
*/
static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
- const intel_clock_t *calculated_clock,
- const intel_clock_t *best_clock,
+ const struct dpll *calculated_clock,
+ const struct dpll *best_clock,
unsigned int best_error_ppm,
unsigned int *error_ppm)
{
@@ -918,14 +922,14 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
static bool
-vlv_find_best_dpll(const intel_limit_t *limit,
+vlv_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
- intel_clock_t clock;
+ struct dpll clock;
unsigned int bestppm = 1000000;
/* min update 19.2 MHz */
int max_n = min(limit->n.max, refclk / 19200);
@@ -977,15 +981,15 @@ vlv_find_best_dpll(const intel_limit_t *limit,
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
static bool
-chv_find_best_dpll(const intel_limit_t *limit,
+chv_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
unsigned int best_error_ppm;
- intel_clock_t clock;
+ struct dpll clock;
uint64_t m2;
int found = false;
@@ -1035,10 +1039,10 @@ chv_find_best_dpll(const intel_limit_t *limit,
}
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
- intel_clock_t *best_clock)
+ struct dpll *best_clock)
{
int refclk = 100000;
- const intel_limit_t *limit = &intel_limits_bxt;
+ const struct intel_limit *limit = &intel_limits_bxt;
return chv_find_best_dpll(limit, crtc_state,
target_clock, refclk, NULL, best_clock);
@@ -1203,7 +1207,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
u32 val;
/* ILK FDI PLL is always enabled */
- if (INTEL_INFO(dev_priv)->gen == 5)
+ if (IS_GEN5(dev_priv))
return;
/* On Haswell, DDI ports are responsible for the FDI PLL setup */
@@ -2309,7 +2313,7 @@ err_pm:
return ret;
}
-static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
+void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
@@ -3110,17 +3114,12 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return -ENODEV;
}
-static void intel_complete_page_flips(struct drm_device *dev)
+static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
{
- struct drm_crtc *crtc;
-
- for_each_crtc(dev, crtc) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum plane plane = intel_crtc->plane;
+ struct intel_crtc *crtc;
- intel_prepare_page_flip(dev, plane);
- intel_finish_page_flip_plane(dev, plane);
- }
+ for_each_intel_crtc(dev_priv->dev, crtc)
+ intel_finish_page_flip_cs(dev_priv, crtc->pipe);
}
static void intel_update_primary_planes(struct drm_device *dev)
@@ -3143,41 +3142,39 @@ static void intel_update_primary_planes(struct drm_device *dev)
}
}
-void intel_prepare_reset(struct drm_device *dev)
+void intel_prepare_reset(struct drm_i915_private *dev_priv)
{
/* no reset support for gen2 */
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
return;
/* reset doesn't touch the display */
- if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
return;
- drm_modeset_lock_all(dev);
+ drm_modeset_lock_all(dev_priv->dev);
/*
* Disabling the crtcs gracefully seems nicer. Also the
* g33 docs say we should at least disable all the planes.
*/
- intel_display_suspend(dev);
+ intel_display_suspend(dev_priv->dev);
}
-void intel_finish_reset(struct drm_device *dev)
+void intel_finish_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/*
* Flips in the rings will be nuked by the reset,
* so complete all pending flips so that user space
* will get its events and not get stuck.
*/
- intel_complete_page_flips(dev);
+ intel_complete_page_flips(dev_priv);
/* no reset support for gen2 */
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
return;
/* reset doesn't touch the display */
- if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
/*
* Flips in the rings have been nuked by the reset,
* so update the base address of all primary
@@ -3187,7 +3184,7 @@ void intel_finish_reset(struct drm_device *dev)
* FIXME: Atomic will make this obsolete since we won't schedule
* CS-based flips (which might get lost in gpu resets) any more.
*/
- intel_update_primary_planes(dev);
+ intel_update_primary_planes(dev_priv->dev);
return;
}
@@ -3198,18 +3195,18 @@ void intel_finish_reset(struct drm_device *dev)
intel_runtime_pm_disable_interrupts(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);
- intel_modeset_init_hw(dev);
+ intel_modeset_init_hw(dev_priv->dev);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
+ dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- intel_display_resume(dev);
+ intel_display_resume(dev_priv->dev);
intel_hpd_init(dev_priv);
- drm_modeset_unlock_all(dev);
+ drm_modeset_unlock_all(dev_priv->dev);
}
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
@@ -3224,7 +3221,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
return false;
spin_lock_irq(&dev->event_lock);
- pending = to_intel_crtc(crtc)->unpin_work != NULL;
+ pending = to_intel_crtc(crtc)->flip_work != NULL;
spin_unlock_irq(&dev->event_lock);
return pending;
@@ -3803,7 +3800,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
if (atomic_read(&crtc->unpin_work_count) == 0)
continue;
- if (crtc->unpin_work)
+ if (crtc->flip_work)
intel_wait_for_vblank(dev, crtc->pipe);
return true;
@@ -3815,11 +3812,9 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
static void page_flip_completed(struct intel_crtc *intel_crtc)
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- struct intel_unpin_work *work = intel_crtc->unpin_work;
+ struct intel_flip_work *work = intel_crtc->flip_work;
- /* ensure that the unpin work is consistent wrt ->pending. */
- smp_rmb();
- intel_crtc->unpin_work = NULL;
+ intel_crtc->flip_work = NULL;
if (work->event)
drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
@@ -3827,7 +3822,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
drm_crtc_vblank_put(&intel_crtc->base);
wake_up_all(&dev_priv->pending_flip_queue);
- queue_work(dev_priv->wq, &work->work);
+ queue_work(dev_priv->wq, &work->unpin_work);
trace_i915_flip_complete(intel_crtc->plane,
work->pending_flip_obj);
@@ -3851,9 +3846,11 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
if (ret == 0) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_flip_work *work;
spin_lock_irq(&dev->event_lock);
- if (intel_crtc->unpin_work) {
+ work = intel_crtc->flip_work;
+ if (work && !is_mmio_work(work)) {
WARN_ONCE(1, "Removing stuck page flip\n");
page_flip_completed(intel_crtc);
}
@@ -4281,8 +4278,9 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
- DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
- intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
+ DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n",
+ intel_crtc->base.base.id, intel_crtc->base.name,
+ intel_crtc->pipe, SKL_CRTC_INDEX);
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
&state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
@@ -4312,9 +4310,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
bool force_detach = !fb || !plane_state->visible;
- DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
- intel_plane->base.base.id, intel_crtc->pipe,
- drm_plane_index(&intel_plane->base));
+ DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
+ intel_plane->base.base.id, intel_plane->base.name,
+ intel_crtc->pipe, drm_plane_index(&intel_plane->base));
ret = skl_update_scaler(crtc_state, force_detach,
drm_plane_index(&intel_plane->base),
@@ -4330,8 +4328,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
/* check colorkey */
if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
- DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
- intel_plane->base.base.id);
+ DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
+ intel_plane->base.base.id,
+ intel_plane->base.name);
return -EINVAL;
}
@@ -4350,8 +4349,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_VYUY:
break;
default:
- DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
- intel_plane->base.base.id, fb->base.id, fb->pixel_format);
+ DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
+ intel_plane->base.base.id, intel_plane->base.name,
+ fb->base.id, fb->pixel_format);
return -EINVAL;
}
@@ -5269,21 +5269,34 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
return max_cdclk_freq*90/100;
}
+static int skl_calc_cdclk(int max_pixclk, int vco);
+
static void intel_update_max_cdclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
+ int max_cdclk, vco;
+
+ vco = dev_priv->skl_preferred_vco_freq;
+ WARN_ON(vco != 8100000 && vco != 8640000);
+ /*
+ * Use the lower (vco 8640) cdclk values as a
+ * first guess. skl_calc_cdclk() will correct it
+ * if the preferred vco is 8100 instead.
+ */
if (limit == SKL_DFSM_CDCLK_LIMIT_675)
- dev_priv->max_cdclk_freq = 675000;
+ max_cdclk = 617143;
else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
- dev_priv->max_cdclk_freq = 540000;
+ max_cdclk = 540000;
else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
- dev_priv->max_cdclk_freq = 450000;
+ max_cdclk = 432000;
else
- dev_priv->max_cdclk_freq = 337500;
+ max_cdclk = 308571;
+
+ dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
} else if (IS_BROXTON(dev)) {
dev_priv->max_cdclk_freq = 624000;
} else if (IS_BROADWELL(dev)) {
@@ -5324,264 +5337,313 @@ static void intel_update_cdclk(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
- DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
- dev_priv->cdclk_freq);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
+ dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco,
+ dev_priv->cdclk_pll.ref);
+ else
+ DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
+ dev_priv->cdclk_freq);
/*
- * Program the gmbus_freq based on the cdclk frequency.
- * BSpec erroneously claims we should aim for 4MHz, but
- * in fact 1MHz is the correct frequency.
+ * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
+ * Programmng [sic] note: bit[9:2] should be programmed to the number
+ * of cdclk that generates 4MHz reference clock freq which is used to
+ * generate GMBus clock. This will vary with the cdclk freq.
*/
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
- /*
- * Program the gmbus_freq based on the cdclk frequency.
- * BSpec erroneously claims we should aim for 4MHz, but
- * in fact 1MHz is the correct frequency.
- */
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
- }
+}
- if (dev_priv->max_cdclk_freq == 0)
- intel_update_max_cdclk(dev);
+/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
+static int skl_cdclk_decimal(int cdclk)
+{
+ return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
}
-static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency)
+static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
{
- uint32_t divider;
- uint32_t ratio;
- uint32_t current_freq;
- int ret;
+ int ratio;
+
+ if (cdclk == dev_priv->cdclk_pll.ref)
+ return 0;
- /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
- switch (frequency) {
+ switch (cdclk) {
+ default:
+ MISSING_CASE(cdclk);
case 144000:
+ case 288000:
+ case 384000:
+ case 576000:
+ ratio = 60;
+ break;
+ case 624000:
+ ratio = 65;
+ break;
+ }
+
+ return dev_priv->cdclk_pll.ref * ratio;
+}
+
+static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(BXT_DE_PLL_ENABLE, 0);
+
+ /* Timeout 200us */
+ if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1))
+ DRM_ERROR("timeout waiting for DE PLL unlock\n");
+
+ dev_priv->cdclk_pll.vco = 0;
+}
+
+static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
+{
+ int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref);
+ u32 val;
+
+ val = I915_READ(BXT_DE_PLL_CTL);
+ val &= ~BXT_DE_PLL_RATIO_MASK;
+ val |= BXT_DE_PLL_RATIO(ratio);
+ I915_WRITE(BXT_DE_PLL_CTL, val);
+
+ I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
+
+ /* Timeout 200us */
+ if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1))
+ DRM_ERROR("timeout waiting for DE PLL lock\n");
+
+ dev_priv->cdclk_pll.vco = vco;
+}
+
+static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
+{
+ u32 val, divider;
+ int vco, ret;
+
+ vco = bxt_de_pll_vco(dev_priv, cdclk);
+
+ DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
+
+ /* cdclk = vco / 2 / div{1,1.5,2,4} */
+ switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
+ case 8:
divider = BXT_CDCLK_CD2X_DIV_SEL_4;
- ratio = BXT_DE_PLL_RATIO(60);
break;
- case 288000:
+ case 4:
divider = BXT_CDCLK_CD2X_DIV_SEL_2;
- ratio = BXT_DE_PLL_RATIO(60);
break;
- case 384000:
+ case 3:
divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
- ratio = BXT_DE_PLL_RATIO(60);
- break;
- case 576000:
- divider = BXT_CDCLK_CD2X_DIV_SEL_1;
- ratio = BXT_DE_PLL_RATIO(60);
break;
- case 624000:
+ case 2:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
- ratio = BXT_DE_PLL_RATIO(65);
- break;
- case 19200:
- /*
- * Bypass frequency with DE PLL disabled. Init ratio, divider
- * to suppress GCC warning.
- */
- ratio = 0;
- divider = 0;
break;
default:
- DRM_ERROR("unsupported CDCLK freq %d", frequency);
+ WARN_ON(cdclk != dev_priv->cdclk_pll.ref);
+ WARN_ON(vco != 0);
- return;
+ divider = BXT_CDCLK_CD2X_DIV_SEL_1;
+ break;
}
- mutex_lock(&dev_priv->rps.hw_lock);
/* Inform power controller of upcoming frequency change */
+ mutex_lock(&dev_priv->rps.hw_lock);
ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
0x80000000);
mutex_unlock(&dev_priv->rps.hw_lock);
if (ret) {
DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
- ret, frequency);
+ ret, cdclk);
return;
}
- current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
- /* convert from .1 fixpoint MHz with -1MHz offset to kHz */
- current_freq = current_freq * 500 + 1000;
+ if (dev_priv->cdclk_pll.vco != 0 &&
+ dev_priv->cdclk_pll.vco != vco)
+ bxt_de_pll_disable(dev_priv);
- /*
- * DE PLL has to be disabled when
- * - setting to 19.2MHz (bypass, PLL isn't used)
- * - before setting to 624MHz (PLL needs toggling)
- * - before setting to any frequency from 624MHz (PLL needs toggling)
- */
- if (frequency == 19200 || frequency == 624000 ||
- current_freq == 624000) {
- I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
- /* Timeout 200us */
- if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
- 1))
- DRM_ERROR("timout waiting for DE PLL unlock\n");
- }
-
- if (frequency != 19200) {
- uint32_t val;
-
- val = I915_READ(BXT_DE_PLL_CTL);
- val &= ~BXT_DE_PLL_RATIO_MASK;
- val |= ratio;
- I915_WRITE(BXT_DE_PLL_CTL, val);
-
- I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
- /* Timeout 200us */
- if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
- DRM_ERROR("timeout waiting for DE PLL lock\n");
-
- val = I915_READ(CDCLK_CTL);
- val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
- val |= divider;
- /*
- * Disable SSA Precharge when CD clock frequency < 500 MHz,
- * enable otherwise.
- */
- val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
- if (frequency >= 500000)
- val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+ if (dev_priv->cdclk_pll.vco != vco)
+ bxt_de_pll_enable(dev_priv, vco);
- val &= ~CDCLK_FREQ_DECIMAL_MASK;
- /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
- val |= (frequency - 1000) / 500;
- I915_WRITE(CDCLK_CTL, val);
- }
+ val = divider | skl_cdclk_decimal(cdclk);
+ /*
+ * FIXME if only the cd2x divider needs changing, it could be done
+ * without shutting off the pipe (if only one pipe is active).
+ */
+ val |= BXT_CDCLK_CD2X_PIPE_NONE;
+ /*
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
+ */
+ if (cdclk >= 500000)
+ val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+ I915_WRITE(CDCLK_CTL, val);
mutex_lock(&dev_priv->rps.hw_lock);
ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
- DIV_ROUND_UP(frequency, 25000));
+ DIV_ROUND_UP(cdclk, 25000));
mutex_unlock(&dev_priv->rps.hw_lock);
if (ret) {
DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
- ret, frequency);
+ ret, cdclk);
return;
}
intel_update_cdclk(dev_priv->dev);
}
-static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv)
+static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
- if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE))
- return false;
+ u32 cdctl, expected;
- /* TODO: Check for a valid CDCLK rate */
+ intel_update_cdclk(dev_priv->dev);
- if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) {
- DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n");
+ if (dev_priv->cdclk_pll.vco == 0 ||
+ dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
+ goto sanitize;
- return false;
- }
+ /* DPLL okay; verify the cdclock
+ *
+ * Some BIOS versions leave an incorrect decimal frequency value and
+ * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
+ * so sanitize this register.
+ */
+ cdctl = I915_READ(CDCLK_CTL);
+ /*
+ * Let's ignore the pipe field, since BIOS could have configured the
+ * dividers both synching to an active pipe, or asynchronously
+ * (PIPE_NONE).
+ */
+ cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
- if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) {
- DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n");
+ expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
+ skl_cdclk_decimal(dev_priv->cdclk_freq);
+ /*
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
+ */
+ if (dev_priv->cdclk_freq >= 500000)
+ expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
- return false;
- }
+ if (cdctl == expected)
+ /* All well; nothing to sanitize */
+ return;
- return true;
-}
+sanitize:
+ DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
-bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv)
-{
- return broxton_cdclk_is_enabled(dev_priv);
+ /* force cdclk programming */
+ dev_priv->cdclk_freq = 0;
+
+ /* force full PLL disable + enable */
+ dev_priv->cdclk_pll.vco = -1;
}
void broxton_init_cdclk(struct drm_i915_private *dev_priv)
{
- /* check if cd clock is enabled */
- if (broxton_cdclk_is_enabled(dev_priv)) {
- DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n");
- return;
- }
+ bxt_sanitize_cdclk(dev_priv);
- DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n");
+ if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0)
+ return;
/*
* FIXME:
* - The initial CDCLK needs to be read from VBT.
* Need to make this change after VBT has changes for BXT.
- * - check if setting the max (or any) cdclk freq is really necessary
- * here, it belongs to modeset time
*/
- broxton_set_cdclk(dev_priv, 624000);
-
- I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL);
+ broxton_set_cdclk(dev_priv, broxton_calc_cdclk(0));
+}
- udelay(10);
+void broxton_uninit_cdclk(struct drm_i915_private *dev_priv)
+{
+ broxton_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref);
+}
- if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
- DRM_ERROR("DBuf power enable timeout!\n");
+static int skl_calc_cdclk(int max_pixclk, int vco)
+{
+ if (vco == 8640000) {
+ if (max_pixclk > 540000)
+ return 617143;
+ else if (max_pixclk > 432000)
+ return 540000;
+ else if (max_pixclk > 308571)
+ return 432000;
+ else
+ return 308571;
+ } else {
+ if (max_pixclk > 540000)
+ return 675000;
+ else if (max_pixclk > 450000)
+ return 540000;
+ else if (max_pixclk > 337500)
+ return 450000;
+ else
+ return 337500;
+ }
}
-void broxton_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void
+skl_dpll0_update(struct drm_i915_private *dev_priv)
{
- I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL);
+ u32 val;
- udelay(10);
+ dev_priv->cdclk_pll.ref = 24000;
+ dev_priv->cdclk_pll.vco = 0;
- if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
- DRM_ERROR("DBuf power disable timeout!\n");
+ val = I915_READ(LCPLL1_CTL);
+ if ((val & LCPLL_PLL_ENABLE) == 0)
+ return;
- /* Set minimum (bypass) frequency, in effect turning off the DE PLL */
- broxton_set_cdclk(dev_priv, 19200);
-}
+ if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
+ return;
-static const struct skl_cdclk_entry {
- unsigned int freq;
- unsigned int vco;
-} skl_cdclk_frequencies[] = {
- { .freq = 308570, .vco = 8640 },
- { .freq = 337500, .vco = 8100 },
- { .freq = 432000, .vco = 8640 },
- { .freq = 450000, .vco = 8100 },
- { .freq = 540000, .vco = 8100 },
- { .freq = 617140, .vco = 8640 },
- { .freq = 675000, .vco = 8100 },
-};
+ val = I915_READ(DPLL_CTRL1);
-static unsigned int skl_cdclk_decimal(unsigned int freq)
-{
- return (freq - 1000) / 500;
+ if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
+ DPLL_CTRL1_SSC(SKL_DPLL0) |
+ DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
+ DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
+ return;
+
+ switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
+ dev_priv->cdclk_pll.vco = 8100000;
+ break;
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
+ dev_priv->cdclk_pll.vco = 8640000;
+ break;
+ default:
+ MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
+ break;
+ }
}
-static unsigned int skl_cdclk_get_vco(unsigned int freq)
+void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
- const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
+ bool changed = dev_priv->skl_preferred_vco_freq != vco;
- if (e->freq == freq)
- return e->vco;
- }
+ dev_priv->skl_preferred_vco_freq = vco;
- return 8100;
+ if (changed)
+ intel_update_max_cdclk(dev_priv->dev);
}
static void
-skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
+skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
{
- unsigned int min_freq;
+ int min_cdclk = skl_calc_cdclk(0, vco);
u32 val;
- /* select the minimum CDCLK before enabling DPLL 0 */
- val = I915_READ(CDCLK_CTL);
- val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
- val |= CDCLK_FREQ_337_308;
-
- if (required_vco == 8640)
- min_freq = 308570;
- else
- min_freq = 337500;
-
- val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
+ WARN_ON(vco != 8100000 && vco != 8640000);
+ /* select the minimum CDCLK before enabling DPLL 0 */
+ val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
I915_WRITE(CDCLK_CTL, val);
POSTING_READ(CDCLK_CTL);
@@ -5592,14 +5654,14 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
* 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
* The modeset code is responsible for the selection of the exact link
* rate later on, with the constraint of choosing a frequency that
- * works with required_vco.
+ * works with vco.
*/
val = I915_READ(DPLL_CTRL1);
val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
- if (required_vco == 8640)
+ if (vco == 8640000)
val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
SKL_DPLL0);
else
@@ -5613,6 +5675,21 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
DRM_ERROR("DPLL0 not locked\n");
+
+ dev_priv->cdclk_pll.vco = vco;
+
+ /* We'll want to keep using the current vco from now on. */
+ skl_set_preferred_cdclk_vco(dev_priv, vco);
+}
+
+static void
+skl_dpll0_disable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
+ if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
+ DRM_ERROR("Couldn't disable DPLL0\n");
+
+ dev_priv->cdclk_pll.vco = 0;
}
static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
@@ -5642,12 +5719,14 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
return false;
}
-static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
+static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
{
struct drm_device *dev = dev_priv->dev;
u32 freq_select, pcu_ack;
- DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
+ WARN_ON((cdclk == 24000) != (vco == 0));
+
+ DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
DRM_ERROR("failed to inform PCU about cdclk change\n");
@@ -5655,7 +5734,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
}
/* set CDCLK_CTL */
- switch(freq) {
+ switch (cdclk) {
case 450000:
case 432000:
freq_select = CDCLK_FREQ_450_432;
@@ -5665,20 +5744,27 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
freq_select = CDCLK_FREQ_540;
pcu_ack = 2;
break;
- case 308570:
+ case 308571:
case 337500:
default:
freq_select = CDCLK_FREQ_337_308;
pcu_ack = 0;
break;
- case 617140:
+ case 617143:
case 675000:
freq_select = CDCLK_FREQ_675_617;
pcu_ack = 3;
break;
}
- I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
+ if (dev_priv->cdclk_pll.vco != 0 &&
+ dev_priv->cdclk_pll.vco != vco)
+ skl_dpll0_disable(dev_priv);
+
+ if (dev_priv->cdclk_pll.vco != vco)
+ skl_dpll0_enable(dev_priv, vco);
+
+ I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
POSTING_READ(CDCLK_CTL);
/* inform PCU of the change */
@@ -5689,52 +5775,41 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
intel_update_cdclk(dev);
}
+static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
+
void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
{
- /* disable DBUF power */
- I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL);
-
- udelay(10);
-
- if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
- DRM_ERROR("DBuf power disable timeout\n");
-
- /* disable DPLL0 */
- I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
- if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
- DRM_ERROR("Couldn't disable DPLL0\n");
+ skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0);
}
void skl_init_cdclk(struct drm_i915_private *dev_priv)
{
- unsigned int required_vco;
-
- /* DPLL0 not enabled (happens on early BIOS versions) */
- if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
- /* enable DPLL0 */
- required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
- skl_dpll0_enable(dev_priv, required_vco);
- }
+ int cdclk, vco;
- /* set CDCLK to the frequency the BIOS chose */
- skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
+ skl_sanitize_cdclk(dev_priv);
- /* enable DBUF power */
- I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL);
+ if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) {
+ /*
+ * Use the current vco as our initial
+ * guess as to what the preferred vco is.
+ */
+ if (dev_priv->skl_preferred_vco_freq == 0)
+ skl_set_preferred_cdclk_vco(dev_priv,
+ dev_priv->cdclk_pll.vco);
+ return;
+ }
- udelay(10);
+ vco = dev_priv->skl_preferred_vco_freq;
+ if (vco == 0)
+ vco = 8100000;
+ cdclk = skl_calc_cdclk(0, vco);
- if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
- DRM_ERROR("DBuf power enable timeout\n");
+ skl_set_cdclk(dev_priv, cdclk, vco);
}
-int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
+static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
- uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
- uint32_t cdctl = I915_READ(CDCLK_CTL);
- int freq = dev_priv->skl_boot_cdclk;
+ uint32_t cdctl, expected;
/*
* check if the pre-os intialized the display
@@ -5744,8 +5819,10 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
goto sanitize;
+ intel_update_cdclk(dev_priv->dev);
/* Is PLL enabled and locked ? */
- if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
+ if (dev_priv->cdclk_pll.vco == 0 ||
+ dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
goto sanitize;
/* DPLL okay; verify the cdclock
@@ -5754,19 +5831,20 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
* decimal part is programmed wrong from BIOS where pre-os does not
* enable display. Verify the same as well.
*/
- if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
+ cdctl = I915_READ(CDCLK_CTL);
+ expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
+ skl_cdclk_decimal(dev_priv->cdclk_freq);
+ if (cdctl == expected)
/* All well; nothing to sanitize */
- return false;
+ return;
+
sanitize:
- /*
- * As of now initialize with max cdclk till
- * we get dynamic cdclk support
- * */
- dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
- skl_init_cdclk(dev_priv);
+ DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
- /* we did have to sanitize */
- return true;
+ /* force cdclk programming */
+ dev_priv->cdclk_freq = 0;
+ /* force full PLL disable + enable */
+ dev_priv->cdclk_pll.vco = -1;
}
/* Adjust CDclk dividers to allow high res or save power if possible */
@@ -5906,21 +5984,15 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
return 200000;
}
-static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
- int max_pixclk)
+static int broxton_calc_cdclk(int max_pixclk)
{
- /*
- * FIXME:
- * - remove the guardband, it's not needed on BXT
- * - set 19.2MHz bypass frequency if there are no active pipes
- */
- if (max_pixclk > 576000*9/10)
+ if (max_pixclk > 576000)
return 624000;
- else if (max_pixclk > 384000*9/10)
+ else if (max_pixclk > 384000)
return 576000;
- else if (max_pixclk > 288000*9/10)
+ else if (max_pixclk > 288000)
return 384000;
- else if (max_pixclk > 144000*9/10)
+ else if (max_pixclk > 144000)
return 288000;
else
return 144000;
@@ -5963,9 +6035,6 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
struct intel_atomic_state *intel_state =
to_intel_atomic_state(state);
- if (max_pixclk < 0)
- return max_pixclk;
-
intel_state->cdclk = intel_state->dev_cdclk =
valleyview_calc_cdclk(dev_priv, max_pixclk);
@@ -5977,20 +6046,15 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
{
- struct drm_device *dev = state->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int max_pixclk = intel_mode_max_pixclk(dev, state);
+ int max_pixclk = ilk_max_pixel_rate(state);
struct intel_atomic_state *intel_state =
to_intel_atomic_state(state);
- if (max_pixclk < 0)
- return max_pixclk;
-
intel_state->cdclk = intel_state->dev_cdclk =
- broxton_calc_cdclk(dev_priv, max_pixclk);
+ broxton_calc_cdclk(max_pixclk);
if (!intel_state->active_crtcs)
- intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0);
+ intel_state->dev_cdclk = broxton_calc_cdclk(0);
return 0;
}
@@ -6252,7 +6316,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
return;
if (to_intel_plane_state(crtc->primary->state)->visible) {
- WARN_ON(intel_crtc->unpin_work);
+ WARN_ON(intel_crtc->flip_work);
intel_pre_disable_primary_noatomic(crtc);
@@ -6262,8 +6326,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
dev_priv->display.crtc_disable(crtc);
- DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n",
- crtc->base.id);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
+ crtc->base.id, crtc->name);
WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
crtc->state->active = false;
@@ -6563,10 +6627,10 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ int clock_limit = dev_priv->max_dotclk_freq;
- /* FIXME should check pixel clock limits on all platforms */
if (INTEL_INFO(dev)->gen < 4) {
- int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
+ clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
/*
* Enable double wide mode when the dot clock
@@ -6574,16 +6638,16 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
*/
if (intel_crtc_supports_double_wide(crtc) &&
adjusted_mode->crtc_clock > clock_limit) {
- clock_limit *= 2;
+ clock_limit = dev_priv->max_dotclk_freq;
pipe_config->double_wide = true;
}
+ }
- if (adjusted_mode->crtc_clock > clock_limit) {
- DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
- adjusted_mode->crtc_clock, clock_limit,
- yesno(pipe_config->double_wide));
- return -EINVAL;
- }
+ if (adjusted_mode->crtc_clock > clock_limit) {
+ DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
+ adjusted_mode->crtc_clock, clock_limit,
+ yesno(pipe_config->double_wide));
+ return -EINVAL;
}
/*
@@ -6615,76 +6679,98 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
static int skylake_get_display_clock_speed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
- uint32_t cdctl = I915_READ(CDCLK_CTL);
- uint32_t linkrate;
+ uint32_t cdctl;
- if (!(lcpll1 & LCPLL_PLL_ENABLE))
- return 24000; /* 24MHz is the cd freq with NSSC ref */
+ skl_dpll0_update(dev_priv);
- if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
- return 540000;
+ if (dev_priv->cdclk_pll.vco == 0)
+ return dev_priv->cdclk_pll.ref;
- linkrate = (I915_READ(DPLL_CTRL1) &
- DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
+ cdctl = I915_READ(CDCLK_CTL);
- if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
- linkrate == DPLL_CTRL1_LINK_RATE_1080) {
- /* vco 8640 */
+ if (dev_priv->cdclk_pll.vco == 8640000) {
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
return 432000;
case CDCLK_FREQ_337_308:
- return 308570;
+ return 308571;
+ case CDCLK_FREQ_540:
+ return 540000;
case CDCLK_FREQ_675_617:
- return 617140;
+ return 617143;
default:
- WARN(1, "Unknown cd freq selection\n");
+ MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
}
} else {
- /* vco 8100 */
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
return 450000;
case CDCLK_FREQ_337_308:
return 337500;
+ case CDCLK_FREQ_540:
+ return 540000;
case CDCLK_FREQ_675_617:
return 675000;
default:
- WARN(1, "Unknown cd freq selection\n");
+ MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
}
}
- /* error case, do as if DPLL0 isn't enabled */
- return 24000;
+ return dev_priv->cdclk_pll.ref;
+}
+
+static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ dev_priv->cdclk_pll.ref = 19200;
+ dev_priv->cdclk_pll.vco = 0;
+
+ val = I915_READ(BXT_DE_PLL_ENABLE);
+ if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
+ return;
+
+ if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
+ return;
+
+ val = I915_READ(BXT_DE_PLL_CTL);
+ dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) *
+ dev_priv->cdclk_pll.ref;
}
static int broxton_get_display_clock_speed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t cdctl = I915_READ(CDCLK_CTL);
- uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
- uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
- int cdclk;
+ u32 divider;
+ int div, vco;
+
+ bxt_de_pll_update(dev_priv);
- if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
- return 19200;
+ vco = dev_priv->cdclk_pll.vco;
+ if (vco == 0)
+ return dev_priv->cdclk_pll.ref;
- cdclk = 19200 * pll_ratio / 2;
+ divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
- switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
+ switch (divider) {
case BXT_CDCLK_CD2X_DIV_SEL_1:
- return cdclk; /* 576MHz or 624MHz */
+ div = 2;
+ break;
case BXT_CDCLK_CD2X_DIV_SEL_1_5:
- return cdclk * 2 / 3; /* 384MHz */
+ div = 3;
+ break;
case BXT_CDCLK_CD2X_DIV_SEL_2:
- return cdclk / 2; /* 288MHz */
+ div = 4;
+ break;
case BXT_CDCLK_CD2X_DIV_SEL_4:
- return cdclk / 4; /* 144MHz */
+ div = 8;
+ break;
+ default:
+ MISSING_CASE(divider);
+ return dev_priv->cdclk_pll.ref;
}
- /* error case, do as if DE PLL isn't enabled */
- return 19200;
+ return DIV_ROUND_CLOSEST(vco, div);
}
static int broadwell_get_display_clock_speed(struct drm_device *dev)
@@ -7063,7 +7149,7 @@ static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- intel_clock_t *reduced_clock)
+ struct dpll *reduced_clock)
{
struct drm_device *dev = crtc->base.dev;
u32 fp, fp2 = 0;
@@ -7487,7 +7573,7 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
static void i9xx_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- intel_clock_t *reduced_clock)
+ struct dpll *reduced_clock)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7563,7 +7649,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
static void i8xx_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- intel_clock_t *reduced_clock)
+ struct dpll *reduced_clock)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7817,7 +7903,7 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- const intel_limit_t *limit;
+ const struct intel_limit *limit;
int refclk = 48000;
memset(&crtc_state->dpll_hw_state, 0,
@@ -7853,7 +7939,7 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- const intel_limit_t *limit;
+ const struct intel_limit *limit;
int refclk = 96000;
memset(&crtc_state->dpll_hw_state, 0,
@@ -7896,7 +7982,7 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- const intel_limit_t *limit;
+ const struct intel_limit *limit;
int refclk = 96000;
memset(&crtc_state->dpll_hw_state, 0,
@@ -7930,7 +8016,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- const intel_limit_t *limit;
+ const struct intel_limit *limit;
int refclk = 96000;
memset(&crtc_state->dpll_hw_state, 0,
@@ -7963,7 +8049,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
int refclk = 100000;
- const intel_limit_t *limit = &intel_limits_chv;
+ const struct intel_limit *limit = &intel_limits_chv;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
@@ -7984,7 +8070,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
int refclk = 100000;
- const intel_limit_t *limit = &intel_limits_vlv;
+ const struct intel_limit *limit = &intel_limits_vlv;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
@@ -8034,7 +8120,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = pipe_config->cpu_transcoder;
- intel_clock_t clock;
+ struct dpll clock;
u32 mdiv;
int refclk = 100000;
@@ -8131,7 +8217,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = pipe_config->cpu_transcoder;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
- intel_clock_t clock;
+ struct dpll clock;
u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
int refclk = 100000;
@@ -8275,12 +8361,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
+ int i;
u32 val, final;
bool has_lvds = false;
bool has_cpu_edp = false;
bool has_panel = false;
bool has_ck505 = false;
bool can_ssc = false;
+ bool using_ssc_source = false;
/* We need to take the global config into account */
for_each_intel_encoder(dev, encoder) {
@@ -8307,8 +8395,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
can_ssc = true;
}
- DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
- has_panel, has_lvds, has_ck505);
+ /* Check if any DPLLs are using the SSC source */
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ u32 temp = I915_READ(PCH_DPLL(i));
+
+ if (!(temp & DPLL_VCO_ENABLE))
+ continue;
+
+ if ((temp & PLL_REF_INPUT_MASK) ==
+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+ using_ssc_source = true;
+ break;
+ }
+ }
+
+ DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
+ has_panel, has_lvds, has_ck505, using_ssc_source);
/* Ironlake: try to setup display ref clock before DPLL
* enabling. This is only under driver's control after
@@ -8328,9 +8430,12 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
else
final |= DREF_NONSPREAD_SOURCE_ENABLE;
- final &= ~DREF_SSC_SOURCE_MASK;
final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
- final &= ~DREF_SSC1_ENABLE;
+
+ if (!using_ssc_source) {
+ final &= ~DREF_SSC_SOURCE_MASK;
+ final &= ~DREF_SSC1_ENABLE;
+ }
if (has_panel) {
final |= DREF_SSC_SOURCE_ENABLE;
@@ -8393,7 +8498,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
} else {
- DRM_DEBUG_KMS("Disabling SSC entirely\n");
+ DRM_DEBUG_KMS("Disabling CPU source output\n");
val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
@@ -8404,16 +8509,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
- /* Turn off the SSC source */
- val &= ~DREF_SSC_SOURCE_MASK;
- val |= DREF_SSC_SOURCE_DISABLE;
+ if (!using_ssc_source) {
+ DRM_DEBUG_KMS("Disabling SSC source\n");
- /* Turn off SSC1 */
- val &= ~DREF_SSC1_ENABLE;
+ /* Turn off the SSC source */
+ val &= ~DREF_SSC_SOURCE_MASK;
+ val |= DREF_SSC_SOURCE_DISABLE;
- I915_WRITE(PCH_DREF_CONTROL, val);
- POSTING_READ(PCH_DREF_CONTROL);
- udelay(200);
+ /* Turn off SSC1 */
+ val &= ~DREF_SSC1_ENABLE;
+
+ I915_WRITE(PCH_DREF_CONTROL, val);
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+ }
}
BUG_ON(val != final);
@@ -8794,7 +8903,7 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state,
- intel_clock_t *reduced_clock)
+ struct dpll *reduced_clock)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
@@ -8902,10 +9011,10 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- intel_clock_t reduced_clock;
+ struct dpll reduced_clock;
bool has_reduced_clock = false;
struct intel_shared_dpll *pll;
- const intel_limit_t *limit;
+ const struct intel_limit *limit;
int refclk = 120000;
memset(&crtc_state->dpll_hw_state, 0,
@@ -9300,6 +9409,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
ironlake_get_fdi_m_n_config(crtc, pipe_config);
if (HAS_PCH_IBX(dev_priv)) {
+ /*
+ * The pipe->pch transcoder and pch transcoder->pll
+ * mapping is fixed.
+ */
pll_id = (enum intel_dpll_id) crtc->pipe;
} else {
tmp = I915_READ(PCH_DPLL_SEL);
@@ -9687,6 +9800,18 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
cdclk, dev_priv->cdclk_freq);
}
+static int broadwell_calc_cdclk(int max_pixclk)
+{
+ if (max_pixclk > 540000)
+ return 675000;
+ else if (max_pixclk > 450000)
+ return 540000;
+ else if (max_pixclk > 337500)
+ return 450000;
+ else
+ return 337500;
+}
+
static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
@@ -9698,14 +9823,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
* FIXME should also account for plane ratio
* once 64bpp pixel formats are supported.
*/
- if (max_pixclk > 540000)
- cdclk = 675000;
- else if (max_pixclk > 450000)
- cdclk = 540000;
- else if (max_pixclk > 337500)
- cdclk = 450000;
- else
- cdclk = 337500;
+ cdclk = broadwell_calc_cdclk(max_pixclk);
if (cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
@@ -9715,7 +9833,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk = intel_state->dev_cdclk = cdclk;
if (!intel_state->active_crtcs)
- intel_state->dev_cdclk = 337500;
+ intel_state->dev_cdclk = broadwell_calc_cdclk(0);
return 0;
}
@@ -9730,6 +9848,47 @@ static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
broadwell_set_cdclk(dev, req_cdclk);
}
+static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
+ const int max_pixclk = ilk_max_pixel_rate(state);
+ int vco = intel_state->cdclk_pll_vco;
+ int cdclk;
+
+ /*
+ * FIXME should also account for plane ratio
+ * once 64bpp pixel formats are supported.
+ */
+ cdclk = skl_calc_cdclk(max_pixclk, vco);
+
+ /*
+ * FIXME move the cdclk caclulation to
+ * compute_config() so we can fail gracegully.
+ */
+ if (cdclk > dev_priv->max_cdclk_freq) {
+ DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
+ cdclk, dev_priv->max_cdclk_freq);
+ cdclk = dev_priv->max_cdclk_freq;
+ }
+
+ intel_state->cdclk = intel_state->dev_cdclk = cdclk;
+ if (!intel_state->active_crtcs)
+ intel_state->dev_cdclk = skl_calc_cdclk(0, vco);
+
+ return 0;
+}
+
+static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(old_state->dev);
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state);
+ unsigned int req_cdclk = intel_state->dev_cdclk;
+ unsigned int req_vco = intel_state->cdclk_pll_vco;
+
+ skl_set_cdclk(dev_priv, req_cdclk, req_vco);
+}
+
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
@@ -9850,6 +10009,10 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
enum intel_display_power_domain power_domain;
u32 tmp;
+ /*
+ * The pipe->transcoder mapping is fixed with the exception of the eDP
+ * transcoder handled below.
+ */
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
/*
@@ -10317,10 +10480,10 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
- obj = i915_gem_alloc_object(dev,
+ obj = i915_gem_object_create(dev,
intel_framebuffer_size_for_mode(mode, bpp));
- if (obj == NULL)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
mode_cmd.width = mode->hdisplay;
mode_cmd.height = mode->vdisplay;
@@ -10632,7 +10795,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
int pipe = pipe_config->cpu_transcoder;
u32 dpll = pipe_config->dpll_hw_state.dpll;
u32 fp;
- intel_clock_t clock;
+ struct dpll clock;
int port_clock;
int refclk = i9xx_pll_refclk(dev, pipe_config);
@@ -10806,31 +10969,27 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-void intel_mark_busy(struct drm_device *dev)
+void intel_mark_busy(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (dev_priv->mm.busy)
return;
intel_runtime_pm_get(dev_priv);
i915_update_gfx_val(dev_priv);
- if (INTEL_INFO(dev)->gen >= 6)
+ if (INTEL_GEN(dev_priv) >= 6)
gen6_rps_busy(dev_priv);
dev_priv->mm.busy = true;
}
-void intel_mark_idle(struct drm_device *dev)
+void intel_mark_idle(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (!dev_priv->mm.busy)
return;
dev_priv->mm.busy = false;
- if (INTEL_INFO(dev)->gen >= 6)
- gen6_rps_idle(dev->dev_private);
+ if (INTEL_GEN(dev_priv) >= 6)
+ gen6_rps_idle(dev_priv);
intel_runtime_pm_put(dev_priv);
}
@@ -10839,15 +10998,16 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct intel_unpin_work *work;
+ struct intel_flip_work *work;
spin_lock_irq(&dev->event_lock);
- work = intel_crtc->unpin_work;
- intel_crtc->unpin_work = NULL;
+ work = intel_crtc->flip_work;
+ intel_crtc->flip_work = NULL;
spin_unlock_irq(&dev->event_lock);
if (work) {
- cancel_work_sync(&work->work);
+ cancel_work_sync(&work->mmio_work);
+ cancel_work_sync(&work->unpin_work);
kfree(work);
}
@@ -10858,12 +11018,15 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
static void intel_unpin_work_fn(struct work_struct *__work)
{
- struct intel_unpin_work *work =
- container_of(__work, struct intel_unpin_work, work);
+ struct intel_flip_work *work =
+ container_of(__work, struct intel_flip_work, unpin_work);
struct intel_crtc *crtc = to_intel_crtc(work->crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_plane *primary = crtc->base.primary;
+ if (is_mmio_work(work))
+ flush_work(&work->mmio_work);
+
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
drm_gem_object_unreference(&work->pending_flip_obj->base);
@@ -10882,60 +11045,14 @@ static void intel_unpin_work_fn(struct work_struct *__work)
kfree(work);
}
-static void do_intel_finish_page_flip(struct drm_device *dev,
- struct drm_crtc *crtc)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_unpin_work *work;
- unsigned long flags;
-
- /* Ignore early vblank irqs */
- if (intel_crtc == NULL)
- return;
-
- /*
- * This is called both by irq handlers and the reset code (to complete
- * lost pageflips) so needs the full irqsave spinlocks.
- */
- spin_lock_irqsave(&dev->event_lock, flags);
- work = intel_crtc->unpin_work;
-
- /* Ensure we don't miss a work->pending update ... */
- smp_rmb();
-
- if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
- spin_unlock_irqrestore(&dev->event_lock, flags);
- return;
- }
-
- page_flip_completed(intel_crtc);
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-void intel_finish_page_flip(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-
- do_intel_finish_page_flip(dev, crtc);
-}
-
-void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
-
- do_intel_finish_page_flip(dev, crtc);
-}
-
/* Is 'a' after or equal to 'b'? */
static bool g4x_flip_count_after_eq(u32 a, u32 b)
{
return !((a - b) & 0x80000000);
}
-static bool page_flip_finished(struct intel_crtc *crtc)
+static bool __pageflip_finished_cs(struct intel_crtc *crtc,
+ struct intel_flip_work *work)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10977,40 +11094,103 @@ static bool page_flip_finished(struct intel_crtc *crtc)
* anyway, we don't really care.
*/
return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
- crtc->unpin_work->gtt_offset &&
+ crtc->flip_work->gtt_offset &&
g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
- crtc->unpin_work->flip_count);
+ crtc->flip_work->flip_count);
}
-void intel_prepare_page_flip(struct drm_device *dev, int plane)
+static bool
+__pageflip_finished_mmio(struct intel_crtc *crtc,
+ struct intel_flip_work *work)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
+ /*
+ * MMIO work completes when vblank is different from
+ * flip_queued_vblank.
+ *
+ * Reset counter value doesn't matter, this is handled by
+ * i915_wait_request finishing early, so no need to handle
+ * reset here.
+ */
+ return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
+}
+
+
+static bool pageflip_finished(struct intel_crtc *crtc,
+ struct intel_flip_work *work)
+{
+ if (!atomic_read(&work->pending))
+ return false;
+
+ smp_rmb();
+
+ if (is_mmio_work(work))
+ return __pageflip_finished_mmio(crtc, work);
+ else
+ return __pageflip_finished_cs(crtc, work);
+}
+
+void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_flip_work *work;
unsigned long flags;
+ /* Ignore early vblank irqs */
+ if (!crtc)
+ return;
/*
* This is called both by irq handlers and the reset code (to complete
* lost pageflips) so needs the full irqsave spinlocks.
- *
- * NB: An MMIO update of the plane base pointer will also
- * generate a page-flip completion irq, i.e. every modeset
- * is also accompanied by a spurious intel_prepare_page_flip().
*/
spin_lock_irqsave(&dev->event_lock, flags);
- if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
- atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
+ work = intel_crtc->flip_work;
+
+ if (work != NULL &&
+ !is_mmio_work(work) &&
+ pageflip_finished(intel_crtc, work))
+ page_flip_completed(intel_crtc);
+
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
+void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
{
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_flip_work *work;
+ unsigned long flags;
+
+ /* Ignore early vblank irqs */
+ if (!crtc)
+ return;
+
+ /*
+ * This is called both by irq handlers and the reset code (to complete
+ * lost pageflips) so needs the full irqsave spinlocks.
+ */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->flip_work;
+
+ if (work != NULL &&
+ is_mmio_work(work) &&
+ pageflip_finished(intel_crtc, work))
+ page_flip_completed(intel_crtc);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
+ struct intel_flip_work *work)
+{
+ work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
+
/* Ensure that the work item is consistent when activating it ... */
- smp_wmb();
- atomic_set(&work->pending, INTEL_FLIP_PENDING);
- /* and that it is marked active as soon as the irq could fire. */
- smp_wmb();
+ smp_mb__before_atomic();
+ atomic_set(&work->pending, 1);
}
static int intel_gen2_queue_flip(struct drm_device *dev,
@@ -11041,10 +11221,9 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(engine, fb->pitches[0]);
- intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
intel_ring_emit(engine, 0); /* aux display base address, unused */
- intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
@@ -11073,10 +11252,9 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(engine, fb->pitches[0]);
- intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
intel_ring_emit(engine, MI_NOOP);
- intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
@@ -11104,7 +11282,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(engine, fb->pitches[0]);
- intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset |
+ intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
@@ -11115,7 +11293,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(engine, pf | pipesrc);
- intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
@@ -11139,7 +11316,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
- intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -11151,7 +11328,6 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(engine, pf | pipesrc);
- intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
@@ -11243,10 +11419,9 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
- intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
intel_ring_emit(engine, (MI_NOOP));
- intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
@@ -11264,7 +11439,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
if (engine == NULL)
return true;
- if (INTEL_INFO(engine->dev)->gen < 5)
+ if (INTEL_GEN(engine->i915) < 5)
return false;
if (i915.use_mmio_flip < 0)
@@ -11283,7 +11458,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
unsigned int rotation,
- struct intel_unpin_work *work)
+ struct intel_flip_work *work)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -11335,7 +11510,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
}
static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
- struct intel_unpin_work *work)
+ struct intel_flip_work *work)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -11358,48 +11533,20 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
POSTING_READ(DSPSURF(intel_crtc->plane));
}
-/*
- * XXX: This is the temporary way to update the plane registers until we get
- * around to using the usual plane update functions for MMIO flips
- */
-static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
+static void intel_mmio_flip_work_func(struct work_struct *w)
{
- struct intel_crtc *crtc = mmio_flip->crtc;
- struct intel_unpin_work *work;
-
- spin_lock_irq(&crtc->base.dev->event_lock);
- work = crtc->unpin_work;
- spin_unlock_irq(&crtc->base.dev->event_lock);
- if (work == NULL)
- return;
-
- intel_mark_page_flip_active(work);
-
- intel_pipe_update_start(crtc);
-
- if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
- skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
- else
- /* use_mmio_flip() retricts MMIO flips to ilk+ */
- ilk_do_mmio_flip(crtc, work);
-
- intel_pipe_update_end(crtc);
-}
-
-static void intel_mmio_flip_work_func(struct work_struct *work)
-{
- struct intel_mmio_flip *mmio_flip =
- container_of(work, struct intel_mmio_flip, work);
+ struct intel_flip_work *work =
+ container_of(w, struct intel_flip_work, mmio_work);
+ struct intel_crtc *crtc = to_intel_crtc(work->crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_framebuffer *intel_fb =
- to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
+ to_intel_framebuffer(crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
- if (mmio_flip->req) {
- WARN_ON(__i915_wait_request(mmio_flip->req,
+ if (work->flip_queued_req)
+ WARN_ON(__i915_wait_request(work->flip_queued_req,
false, NULL,
- &mmio_flip->i915->rps.mmioflips));
- i915_gem_request_unreference__unlocked(mmio_flip->req);
- }
+ &dev_priv->rps.mmioflips));
/* For framebuffer backed by dmabuf, wait for fence */
if (obj->base.dma_buf)
@@ -11407,29 +11554,15 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
false, false,
MAX_SCHEDULE_TIMEOUT) < 0);
- intel_do_mmio_flip(mmio_flip);
- kfree(mmio_flip);
-}
-
-static int intel_queue_mmio_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_i915_gem_object *obj)
-{
- struct intel_mmio_flip *mmio_flip;
-
- mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
- if (mmio_flip == NULL)
- return -ENOMEM;
-
- mmio_flip->i915 = to_i915(dev);
- mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
- mmio_flip->crtc = to_intel_crtc(crtc);
- mmio_flip->rotation = crtc->primary->state->rotation;
+ intel_pipe_update_start(crtc);
- INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
- schedule_work(&mmio_flip->work);
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_do_mmio_flip(crtc, work->rotation, work);
+ else
+ /* use_mmio_flip() retricts MMIO flips to ilk+ */
+ ilk_do_mmio_flip(crtc, work);
- return 0;
+ intel_pipe_update_end(crtc, work);
}
static int intel_default_queue_flip(struct drm_device *dev,
@@ -11442,37 +11575,32 @@ static int intel_default_queue_flip(struct drm_device *dev,
return -ENODEV;
}
-static bool __intel_pageflip_stall_check(struct drm_device *dev,
- struct drm_crtc *crtc)
+static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
+ struct intel_crtc *intel_crtc,
+ struct intel_flip_work *work)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_unpin_work *work = intel_crtc->unpin_work;
- u32 addr;
+ u32 addr, vblank;
- if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
- return true;
-
- if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
+ if (!atomic_read(&work->pending))
return false;
- if (!work->enable_stall_check)
- return false;
+ smp_rmb();
+ vblank = intel_crtc_get_vblank_counter(intel_crtc);
if (work->flip_ready_vblank == 0) {
if (work->flip_queued_req &&
!i915_gem_request_completed(work->flip_queued_req, true))
return false;
- work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
+ work->flip_ready_vblank = vblank;
}
- if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
+ if (vblank - work->flip_ready_vblank < 3)
return false;
/* Potential stall - if we see that the flip has happened,
* assume a missed interrupt. */
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
else
addr = I915_READ(DSPADDR(intel_crtc->plane));
@@ -11484,12 +11612,12 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
return addr == work->gtt_offset;
}
-void intel_check_page_flip(struct drm_device *dev, int pipe)
+void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = dev_priv->dev;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_unpin_work *work;
+ struct intel_flip_work *work;
WARN_ON(!in_interrupt());
@@ -11497,16 +11625,20 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
return;
spin_lock(&dev->event_lock);
- work = intel_crtc->unpin_work;
- if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
- WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
- work->flip_queued_vblank, drm_vblank_count(dev, pipe));
+ work = intel_crtc->flip_work;
+
+ if (work != NULL && !is_mmio_work(work) &&
+ __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
+ WARN_ONCE(1,
+ "Kicking stuck page flip: queued at %d, now %d\n",
+ work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
page_flip_completed(intel_crtc);
work = NULL;
}
- if (work != NULL &&
- drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
- intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
+
+ if (work != NULL && !is_mmio_work(work) &&
+ intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
+ intel_queue_rps_boost_for_request(work->flip_queued_req);
spin_unlock(&dev->event_lock);
}
@@ -11522,7 +11654,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_plane *primary = crtc->primary;
enum pipe pipe = intel_crtc->pipe;
- struct intel_unpin_work *work;
+ struct intel_flip_work *work;
struct intel_engine_cs *engine;
bool mmio_flip;
struct drm_i915_gem_request *request = NULL;
@@ -11559,19 +11691,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->crtc = crtc;
work->old_fb = old_fb;
- INIT_WORK(&work->work, intel_unpin_work_fn);
+ INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
ret = drm_crtc_vblank_get(crtc);
if (ret)
goto free_work;
- /* We borrow the event spin lock for protecting unpin_work */
+ /* We borrow the event spin lock for protecting flip_work */
spin_lock_irq(&dev->event_lock);
- if (intel_crtc->unpin_work) {
+ if (intel_crtc->flip_work) {
/* Before declaring the flip queue wedged, check if
* the hardware completed the operation behind our backs.
*/
- if (__intel_pageflip_stall_check(dev, crtc)) {
+ if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
page_flip_completed(intel_crtc);
} else {
@@ -11583,7 +11715,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return -EBUSY;
}
}
- intel_crtc->unpin_work = work;
+ intel_crtc->flip_work = work;
spin_unlock_irq(&dev->event_lock);
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
@@ -11638,6 +11770,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
*/
if (!mmio_flip) {
ret = i915_gem_object_sync(obj, engine, &request);
+ if (!ret && !request) {
+ request = i915_gem_request_alloc(engine, NULL);
+ ret = PTR_ERR_OR_ZERO(request);
+ }
+
if (ret)
goto cleanup_pending;
}
@@ -11649,38 +11786,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
obj, 0);
work->gtt_offset += intel_crtc->dspaddr_offset;
+ work->rotation = crtc->primary->state->rotation;
if (mmio_flip) {
- ret = intel_queue_mmio_flip(dev, crtc, obj);
- if (ret)
- goto cleanup_unpin;
+ INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
i915_gem_request_assign(&work->flip_queued_req,
obj->last_write_req);
- } else {
- if (!request) {
- request = i915_gem_request_alloc(engine, NULL);
- if (IS_ERR(request)) {
- ret = PTR_ERR(request);
- goto cleanup_unpin;
- }
- }
+ schedule_work(&work->mmio_work);
+ } else {
+ i915_gem_request_assign(&work->flip_queued_req, request);
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
page_flip_flags);
if (ret)
goto cleanup_unpin;
- i915_gem_request_assign(&work->flip_queued_req, request);
- }
+ intel_mark_page_flip_active(intel_crtc, work);
- if (request)
i915_add_request_no_flush(request);
+ }
- work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
- work->enable_stall_check = true;
-
- i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
+ i915_gem_track_fb(intel_fb_obj(old_fb), obj,
to_intel_plane(primary)->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);
@@ -11706,7 +11833,7 @@ cleanup:
drm_framebuffer_unreference(work->old_fb);
spin_lock_irq(&dev->event_lock);
- intel_crtc->unpin_work = NULL;
+ intel_crtc->flip_work = NULL;
spin_unlock_irq(&dev->event_lock);
drm_crtc_vblank_put(crtc);
@@ -11808,12 +11935,12 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane_state *old_plane_state =
to_intel_plane_state(plane->state);
- int idx = intel_crtc->base.base.id, ret;
bool mode_changed = needs_modeset(crtc_state);
bool was_crtc_enabled = crtc->state->active;
bool is_crtc_enabled = crtc_state->active;
bool turn_off, turn_on, visible, was_visible;
struct drm_framebuffer *fb = plane_state->fb;
+ int ret;
if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
plane->type != DRM_PLANE_TYPE_CURSOR) {
@@ -11834,6 +11961,11 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
* Visibility is calculated as if the crtc was on, but
* after scaler setup everything depends on it being off
* when the crtc isn't active.
+ *
+ * FIXME this is wrong for watermarks. Watermarks should also
+ * be computed as if the pipe would be active. Perhaps move
+ * per-plane wm computation to the .check_plane() hook, and
+ * only combine the results from all planes in the current place?
*/
if (!is_crtc_enabled)
to_intel_plane_state(plane_state)->visible = visible = false;
@@ -11847,11 +11979,15 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
turn_off = was_visible && (!visible || mode_changed);
turn_on = visible && (!was_visible || mode_changed);
- DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
- plane->base.id, fb ? fb->base.id : -1);
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
+ intel_crtc->base.base.id,
+ intel_crtc->base.name,
+ plane->base.id, plane->name,
+ fb ? fb->base.id : -1);
- DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
- plane->base.id, was_visible, visible,
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
+ plane->base.id, plane->name,
+ was_visible, visible,
turn_off, turn_on, mode_changed);
if (turn_on) {
@@ -12007,7 +12143,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
}
} else if (dev_priv->display.compute_intermediate_wm) {
if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
- pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk;
+ pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
}
if (INTEL_INFO(dev)->gen >= 9) {
@@ -12142,7 +12278,8 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_plane_state *state;
struct drm_framebuffer *fb;
- DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
+ DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n",
+ crtc->base.base.id, crtc->base.name,
context, pipe_config, pipe_name(crtc->pipe));
DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
@@ -12243,29 +12380,24 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
state = to_intel_plane_state(plane->state);
fb = state->base.fb;
if (!fb) {
- DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
- "disabled, scaler_id = %d\n",
- plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
- plane->base.id, intel_plane->pipe,
- (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
- drm_plane_index(plane), state->scaler_id);
+ DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
+ plane->base.id, plane->name, state->scaler_id);
continue;
}
- DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
- plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
- plane->base.id, intel_plane->pipe,
- crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
- drm_plane_index(plane));
- DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
- fb->base.id, fb->width, fb->height, fb->pixel_format);
- DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
- state->scaler_id,
- state->src.x1 >> 16, state->src.y1 >> 16,
- drm_rect_width(&state->src) >> 16,
- drm_rect_height(&state->src) >> 16,
- state->dst.x1, state->dst.y1,
- drm_rect_width(&state->dst), drm_rect_height(&state->dst));
+ DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
+ plane->base.id, plane->name);
+ DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
+ fb->base.id, fb->width, fb->height,
+ drm_get_format_name(fb->pixel_format));
+ DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
+ state->scaler_id,
+ state->src.x1 >> 16, state->src.y1 >> 16,
+ drm_rect_width(&state->src) >> 16,
+ drm_rect_height(&state->src) >> 16,
+ state->dst.x1, state->dst.y1,
+ drm_rect_width(&state->dst),
+ drm_rect_height(&state->dst));
}
}
@@ -12932,7 +13064,7 @@ verify_crtc_state(struct drm_crtc *crtc,
pipe_config->base.crtc = crtc;
pipe_config->base.state = old_state;
- DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
@@ -13280,6 +13412,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
intel_state->active_crtcs |= 1 << i;
else
intel_state->active_crtcs &= ~(1 << i);
+
+ if (crtc_state->active != crtc->state->active)
+ intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
}
/*
@@ -13290,9 +13425,17 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
* adjusted_mode bits in the crtc directly.
*/
if (dev_priv->display.modeset_calc_cdclk) {
+ if (!intel_state->cdclk_pll_vco)
+ intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco;
+ if (!intel_state->cdclk_pll_vco)
+ intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq;
+
ret = dev_priv->display.modeset_calc_cdclk(state);
+ if (ret < 0)
+ return ret;
- if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq)
+ if (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
+ intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)
ret = intel_modeset_all_pipes(state);
if (ret < 0)
@@ -13316,38 +13459,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
* phase. The code here should be run after the per-crtc and per-plane 'check'
* handlers to ensure that all derived state has been updated.
*/
-static void calc_watermark_data(struct drm_atomic_state *state)
+static int calc_watermark_data(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *crtc;
- struct drm_crtc_state *cstate;
- struct drm_plane *plane;
- struct drm_plane_state *pstate;
-
- /*
- * Calculate watermark configuration details now that derived
- * plane/crtc state is all properly updated.
- */
- drm_for_each_crtc(crtc, dev) {
- cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
- crtc->state;
-
- if (cstate->active)
- intel_state->wm_config.num_pipes_active++;
- }
- drm_for_each_legacy_plane(plane, dev) {
- pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
- plane->state;
+ struct drm_i915_private *dev_priv = to_i915(dev);
- if (!to_intel_plane_state(pstate)->visible)
- continue;
+ /* Is there platform-specific watermark information to calculate? */
+ if (dev_priv->display.compute_global_watermarks)
+ return dev_priv->display.compute_global_watermarks(state);
- intel_state->wm_config.sprites_enabled = true;
- if (pstate->crtc_w != pstate->src_w >> 16 ||
- pstate->crtc_h != pstate->src_h >> 16)
- intel_state->wm_config.sprites_scaled = true;
- }
+ return 0;
}
/**
@@ -13377,14 +13498,13 @@ static int intel_atomic_check(struct drm_device *dev,
if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
crtc_state->mode_changed = true;
- if (!crtc_state->enable) {
- if (needs_modeset(crtc_state))
- any_ms = true;
+ if (!needs_modeset(crtc_state))
continue;
- }
- if (!needs_modeset(crtc_state))
+ if (!crtc_state->enable) {
+ any_ms = true;
continue;
+ }
/* FIXME: For only active_changed we shouldn't need to do any
* state recomputation at all. */
@@ -13394,8 +13514,11 @@ static int intel_atomic_check(struct drm_device *dev,
return ret;
ret = intel_modeset_pipe_config(crtc, pipe_config);
- if (ret)
+ if (ret) {
+ intel_dump_pipe_config(to_intel_crtc(crtc),
+ pipe_config, "[failed]");
return ret;
+ }
if (i915.fastboot &&
intel_pipe_config_compare(dev,
@@ -13405,13 +13528,12 @@ static int intel_atomic_check(struct drm_device *dev,
to_intel_crtc_state(crtc_state)->update_pipe = true;
}
- if (needs_modeset(crtc_state)) {
+ if (needs_modeset(crtc_state))
any_ms = true;
- ret = drm_atomic_add_affected_planes(state, crtc);
- if (ret)
- return ret;
- }
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
needs_modeset(crtc_state) ?
@@ -13431,9 +13553,7 @@ static int intel_atomic_check(struct drm_device *dev,
return ret;
intel_fbc_choose_crtc(dev_priv, state);
- calc_watermark_data(state);
-
- return 0;
+ return calc_watermark_data(state);
}
static int intel_atomic_prepare_commit(struct drm_device *dev,
@@ -13495,6 +13615,16 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
return ret;
}
+u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+
+ if (!dev->max_vblank_count)
+ return drm_accurate_vblank_count(&crtc->base);
+
+ return dev->driver->get_vblank_counter(dev, crtc->pipe);
+}
+
static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
struct drm_i915_private *dev_priv,
unsigned crtc_mask)
@@ -13597,7 +13727,8 @@ static int intel_atomic_commit(struct drm_device *dev,
}
drm_atomic_helper_swap_state(dev, state);
- dev_priv->wm.config = intel_state->wm_config;
+ dev_priv->wm.distrust_bios_wm = false;
+ dev_priv->wm.skl_results = intel_state->wm_results;
intel_shared_dpll_commit(state);
if (intel_state->modeset) {
@@ -13653,7 +13784,8 @@ static int intel_atomic_commit(struct drm_device *dev,
drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
if (dev_priv->display.modeset_commit_cdclk &&
- intel_state->dev_cdclk != dev_priv->cdclk_freq)
+ (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
+ intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
dev_priv->display.modeset_commit_cdclk(state);
intel_modeset_verify_disabled(dev);
@@ -13749,8 +13881,8 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc)
state = drm_atomic_state_alloc(dev);
if (!state) {
- DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
- crtc->base.id);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory",
+ crtc->base.id, crtc->name);
return;
}
@@ -14006,7 +14138,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- intel_pipe_update_end(intel_crtc);
+ intel_pipe_update_end(intel_crtc, NULL);
}
/**
@@ -14018,9 +14150,11 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
*/
void intel_plane_destroy(struct drm_plane *plane)
{
- struct intel_plane *intel_plane = to_intel_plane(plane);
+ if (!plane)
+ return;
+
drm_plane_cleanup(plane);
- kfree(intel_plane);
+ kfree(to_intel_plane(plane));
}
const struct drm_plane_funcs intel_plane_funcs = {
@@ -14092,10 +14226,24 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
primary->disable_plane = i9xx_disable_primary_plane;
}
- ret = drm_universal_plane_init(dev, &primary->base, 0,
- &intel_plane_funcs,
- intel_primary_formats, num_formats,
- DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (INTEL_INFO(dev)->gen >= 9)
+ ret = drm_universal_plane_init(dev, &primary->base, 0,
+ &intel_plane_funcs,
+ intel_primary_formats, num_formats,
+ DRM_PLANE_TYPE_PRIMARY,
+ "plane 1%c", pipe_name(pipe));
+ else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ ret = drm_universal_plane_init(dev, &primary->base, 0,
+ &intel_plane_funcs,
+ intel_primary_formats, num_formats,
+ DRM_PLANE_TYPE_PRIMARY,
+ "primary %c", pipe_name(pipe));
+ else
+ ret = drm_universal_plane_init(dev, &primary->base, 0,
+ &intel_plane_funcs,
+ intel_primary_formats, num_formats,
+ DRM_PLANE_TYPE_PRIMARY,
+ "plane %c", plane_name(primary->plane));
if (ret)
goto fail;
@@ -14253,7 +14401,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
&intel_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
- DRM_PLANE_TYPE_CURSOR, NULL);
+ DRM_PLANE_TYPE_CURSOR,
+ "cursor %c", pipe_name(pipe));
if (ret)
goto fail;
@@ -14338,7 +14487,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
goto fail;
ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
- cursor, &intel_crtc_funcs, NULL);
+ cursor, &intel_crtc_funcs,
+ "pipe %c", pipe_name(pipe));
if (ret)
goto fail;
@@ -14372,10 +14522,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
return;
fail:
- if (primary)
- drm_plane_cleanup(primary);
- if (cursor)
- drm_plane_cleanup(cursor);
+ intel_plane_destroy(primary);
+ intel_plane_destroy(cursor);
kfree(crtc_state);
kfree(intel_crtc);
}
@@ -14554,6 +14702,8 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ bool has_edp;
+
/*
* The DP_DETECTED bit is the latched state of the DDC
* SDA pin at boot. However since eDP doesn't require DDC
@@ -14563,19 +14713,17 @@ static void intel_setup_outputs(struct drm_device *dev)
* eDP ports. Consult the VBT as well as DP_DETECTED to
* detect eDP ports.
*/
- if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
- !intel_dp_is_edp(dev, PORT_B))
+ has_edp = intel_dp_is_edp(dev, PORT_B);
+ if (I915_READ(VLV_DP_B) & DP_DETECTED || has_edp)
+ has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
+ if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && !has_edp)
intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
- if (I915_READ(VLV_DP_B) & DP_DETECTED ||
- intel_dp_is_edp(dev, PORT_B))
- intel_dp_init(dev, VLV_DP_B, PORT_B);
- if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
- !intel_dp_is_edp(dev, PORT_C))
+ has_edp = intel_dp_is_edp(dev, PORT_C);
+ if (I915_READ(VLV_DP_C) & DP_DETECTED || has_edp)
+ has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
+ if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && !has_edp)
intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
- if (I915_READ(VLV_DP_C) & DP_DETECTED ||
- intel_dp_is_edp(dev, PORT_C))
- intel_dp_init(dev, VLV_DP_C, PORT_C);
if (IS_CHERRYVIEW(dev)) {
/* eDP not supported on port D, so don't check VBT */
@@ -15050,12 +15198,13 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
- if (IS_BROADWELL(dev_priv)) {
- dev_priv->display.modeset_commit_cdclk =
- broadwell_modeset_commit_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- broadwell_modeset_calc_cdclk;
- }
+ }
+
+ if (IS_BROADWELL(dev_priv)) {
+ dev_priv->display.modeset_commit_cdclk =
+ broadwell_modeset_commit_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ broadwell_modeset_calc_cdclk;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.modeset_commit_cdclk =
valleyview_modeset_commit_cdclk;
@@ -15066,6 +15215,11 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
broxton_modeset_commit_cdclk;
dev_priv->display.modeset_calc_cdclk =
broxton_modeset_calc_cdclk;
+ } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ dev_priv->display.modeset_commit_cdclk =
+ skl_modeset_commit_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ skl_modeset_calc_cdclk;
}
switch (INTEL_INFO(dev_priv)->gen) {
@@ -15293,7 +15447,7 @@ void intel_modeset_init_hw(struct drm_device *dev)
dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
intel_init_clock_gating(dev);
- intel_enable_gt_powersave(dev);
+ intel_enable_gt_powersave(dev_priv);
}
/*
@@ -15363,7 +15517,6 @@ retry:
}
/* Write calculated watermark values back */
- to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
for_each_crtc_in_state(state, crtc, cstate, i) {
struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
@@ -15461,11 +15614,13 @@ void intel_modeset_init(struct drm_device *dev)
}
intel_update_czclk(dev_priv);
- intel_update_rawclk(dev_priv);
intel_update_cdclk(dev);
intel_shared_dpll_init(dev);
+ if (dev_priv->max_cdclk_freq == 0)
+ intel_update_max_cdclk(dev);
+
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
@@ -15606,8 +15761,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
bool plane;
- DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
- crtc->base.base.id);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
+ crtc->base.base.id, crtc->base.name);
/* Pipe has the wrong plane attached and the plane is active.
* Temporarily change the plane mapping and disable everything
@@ -15775,26 +15930,24 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
if (crtc_state->base.active) {
dev_priv->active_crtcs |= 1 << crtc->pipe;
- if (IS_BROADWELL(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
pixclk = ilk_pipe_pixel_rate(crtc_state);
-
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (crtc_state->ips_enabled)
- pixclk = DIV_ROUND_UP(pixclk * 100, 95);
- } else if (IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) ||
- IS_BROXTON(dev_priv))
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
pixclk = crtc_state->base.adjusted_mode.crtc_clock;
else
WARN_ON(dev_priv->display.modeset_calc_cdclk);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
+ pixclk = DIV_ROUND_UP(pixclk * 100, 95);
}
dev_priv->min_pixclk[crtc->pipe] = pixclk;
readout_plane_state(crtc);
- DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
- crtc->base.base.id,
+ DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
+ crtc->base.base.id, crtc->base.name,
crtc->active ? "enabled" : "disabled");
}
@@ -16025,15 +16178,16 @@ retry:
void intel_modeset_gem_init(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *c;
struct drm_i915_gem_object *obj;
int ret;
- intel_init_gt_powersave(dev);
+ intel_init_gt_powersave(dev_priv);
intel_modeset_init_hw(dev);
- intel_setup_overlay(dev);
+ intel_setup_overlay(dev_priv);
/*
* Make sure any fbs we allocated at startup are properly
@@ -16076,7 +16230,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_connector *connector;
- intel_disable_gt_powersave(dev);
+ intel_disable_gt_powersave(dev_priv);
intel_backlight_unregister(dev);
@@ -16106,9 +16260,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
drm_mode_config_cleanup(dev);
- intel_cleanup_overlay(dev);
+ intel_cleanup_overlay(dev_priv);
- intel_cleanup_gt_powersave(dev);
+ intel_cleanup_gt_powersave(dev_priv);
intel_teardown_gmbus(dev);
}
@@ -16204,9 +16358,8 @@ struct intel_display_error_state {
};
struct intel_display_error_state *
-intel_display_capture_error_state(struct drm_device *dev)
+intel_display_capture_error_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
int transcoders[] = {
TRANSCODER_A,
@@ -16216,14 +16369,14 @@ intel_display_capture_error_state(struct drm_device *dev)
};
int i;
- if (INTEL_INFO(dev)->num_pipes == 0)
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
return NULL;
error = kzalloc(sizeof(*error), GFP_ATOMIC);
if (error == NULL)
return NULL;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
for_each_pipe(dev_priv, i) {
@@ -16239,25 +16392,25 @@ intel_display_capture_error_state(struct drm_device *dev)
error->plane[i].control = I915_READ(DSPCNTR(i));
error->plane[i].stride = I915_READ(DSPSTRIDE(i));
- if (INTEL_INFO(dev)->gen <= 3) {
+ if (INTEL_GEN(dev_priv) <= 3) {
error->plane[i].size = I915_READ(DSPSIZE(i));
error->plane[i].pos = I915_READ(DSPPOS(i));
}
- if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+ if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
error->plane[i].addr = I915_READ(DSPADDR(i));
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
error->plane[i].surface = I915_READ(DSPSURF(i));
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
}
error->pipe[i].source = I915_READ(PIPESRC(i));
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
error->pipe[i].stat = I915_READ(PIPESTAT(i));
}
/* Note: this does not include DSI transcoders. */
- error->num_transcoders = INTEL_INFO(dev)->num_pipes;
+ error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
if (HAS_DDI(dev_priv))
error->num_transcoders++; /* Account for eDP. */
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f192f58708c2..f97cd5305e4c 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -131,11 +131,6 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe);
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
-static unsigned int intel_dp_unused_lane_mask(int lane_count)
-{
- return ~((1 << lane_count) - 1) & 0xf;
-}
-
static int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
@@ -775,6 +770,7 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_TIME_OUT_1600us |
DP_AUX_CH_CTL_RECEIVE_ERROR |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
}
@@ -1582,6 +1578,27 @@ found:
&pipe_config->dp_m2_n2);
}
+ /*
+ * DPLL0 VCO may need to be adjusted to get the correct
+ * clock for eDP. This will affect cdclk as well.
+ */
+ if (is_edp(intel_dp) &&
+ (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) {
+ int vco;
+
+ switch (pipe_config->port_clock / 2) {
+ case 108000:
+ case 216000:
+ vco = 8640000;
+ break;
+ default:
+ vco = 8100000;
+ break;
+ }
+
+ to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco;
+ }
+
if (!HAS_DDI(dev))
intel_dp_set_clock(encoder, pipe_config);
@@ -2460,50 +2477,6 @@ static void vlv_post_disable_dp(struct intel_encoder *encoder)
intel_dp_link_down(intel_dp);
}
-static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
- bool reset)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- enum pipe pipe = crtc->pipe;
- uint32_t val;
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
- if (reset)
- val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
- else
- val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
-
- if (crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
- if (reset)
- val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
- else
- val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
- }
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
- val |= CHV_PCS_REQ_SOFTRESET_EN;
- if (reset)
- val &= ~DPIO_PCS_CLK_SOFT_RESET;
- else
- val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
-
- if (crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
- val |= CHV_PCS_REQ_SOFTRESET_EN;
- if (reset)
- val &= ~DPIO_PCS_CLK_SOFT_RESET;
- else
- val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
- }
-}
-
static void chv_post_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
@@ -2811,266 +2784,38 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
- u32 val;
-
- mutex_lock(&dev_priv->sb_lock);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
- val = 0;
- if (pipe)
- val |= (1<<21);
- else
- val &= ~(1<<21);
- val |= 0x001000c4;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
-
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_phy_pre_encoder_enable(encoder);
intel_enable_dp(encoder);
}
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
-
intel_dp_prepare(encoder);
- /* Program Tx lane resets to default */
- mutex_lock(&dev_priv->sb_lock);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
- DPIO_PCS_TX_LANE2_RESET |
- DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
- DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
- DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
- (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
- DPIO_PCS_CLK_SOFT_RESET);
-
- /* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_phy_pre_pll_enable(encoder);
}
static void chv_pre_enable_dp(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- enum dpio_channel ch = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
- int data, i, stagger;
- u32 val;
-
- mutex_lock(&dev_priv->sb_lock);
-
- /* allow hardware to manage TX FIFO reset source */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
- val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
-
- if (intel_crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
- val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
- }
-
- /* Program Tx lane latency optimal setting*/
- for (i = 0; i < intel_crtc->config->lane_count; i++) {
- /* Set the upar bit */
- if (intel_crtc->config->lane_count == 1)
- data = 0x0;
- else
- data = (i == 1) ? 0x0 : 0x1;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
- data << DPIO_UPAR_SHIFT);
- }
-
- /* Data lane stagger programming */
- if (intel_crtc->config->port_clock > 270000)
- stagger = 0x18;
- else if (intel_crtc->config->port_clock > 135000)
- stagger = 0xd;
- else if (intel_crtc->config->port_clock > 67500)
- stagger = 0x7;
- else if (intel_crtc->config->port_clock > 33750)
- stagger = 0x4;
- else
- stagger = 0x2;
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
- val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
-
- if (intel_crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
- val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
- }
-
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
- DPIO_LANESTAGGER_STRAP(stagger) |
- DPIO_LANESTAGGER_STRAP_OVRD |
- DPIO_TX1_STAGGER_MASK(0x1f) |
- DPIO_TX1_STAGGER_MULT(6) |
- DPIO_TX2_STAGGER_MULT(0));
-
- if (intel_crtc->config->lane_count > 2) {
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
- DPIO_LANESTAGGER_STRAP(stagger) |
- DPIO_LANESTAGGER_STRAP_OVRD |
- DPIO_TX1_STAGGER_MASK(0x1f) |
- DPIO_TX1_STAGGER_MULT(7) |
- DPIO_TX2_STAGGER_MULT(5));
- }
-
- /* Deassert data lane reset */
- chv_data_lane_soft_reset(encoder, false);
-
- mutex_unlock(&dev_priv->sb_lock);
+ chv_phy_pre_encoder_enable(encoder);
intel_enable_dp(encoder);
/* Second common lane will stay alive on its own now */
- if (dport->release_cl2_override) {
- chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
- dport->release_cl2_override = false;
- }
+ chv_phy_release_cl2_override(encoder);
}
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- enum dpio_channel ch = vlv_dport_to_channel(dport);
- enum pipe pipe = intel_crtc->pipe;
- unsigned int lane_mask =
- intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
- u32 val;
-
intel_dp_prepare(encoder);
- /*
- * Must trick the second common lane into life.
- * Otherwise we can't even access the PLL.
- */
- if (ch == DPIO_CH0 && pipe == PIPE_B)
- dport->release_cl2_override =
- !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
-
- chv_phy_powergate_lanes(encoder, true, lane_mask);
-
- mutex_lock(&dev_priv->sb_lock);
-
- /* Assert data lane reset */
- chv_data_lane_soft_reset(encoder, true);
-
- /* program left/right clock distribution */
- if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
- val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- if (ch == DPIO_CH0)
- val |= CHV_BUFLEFTENA1_FORCE;
- if (ch == DPIO_CH1)
- val |= CHV_BUFRIGHTENA1_FORCE;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
- } else {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
- val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- if (ch == DPIO_CH0)
- val |= CHV_BUFLEFTENA2_FORCE;
- if (ch == DPIO_CH1)
- val |= CHV_BUFRIGHTENA2_FORCE;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
- }
-
- /* program clock channel usage */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
- val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
- if (pipe != PIPE_B)
- val &= ~CHV_PCS_USEDCLKCHANNEL;
- else
- val |= CHV_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
-
- if (intel_crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
- val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
- if (pipe != PIPE_B)
- val &= ~CHV_PCS_USEDCLKCHANNEL;
- else
- val |= CHV_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
- }
-
- /*
- * This a a bit weird since generally CL
- * matches the pipe, but here we need to
- * pick the CL based on the port.
- */
- val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
- if (pipe != PIPE_B)
- val &= ~CHV_CMN_USEDCLKCHANNEL;
- else
- val |= CHV_CMN_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
-
- mutex_unlock(&dev_priv->sb_lock);
+ chv_phy_pre_pll_enable(encoder);
}
static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
- u32 val;
-
- mutex_lock(&dev_priv->sb_lock);
-
- /* disable left/right clock distribution */
- if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
- val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
- } else {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
- val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
- }
-
- mutex_unlock(&dev_priv->sb_lock);
-
- /*
- * Leave the power down bit cleared for at least one
- * lane so that chv_powergate_phy_ch() will power
- * on something when the channel is otherwise unused.
- * When the port is off and the override is removed
- * the lanes power down anyway, so otherwise it doesn't
- * really matter what the state of power down bits is
- * after this.
- */
- chv_phy_powergate_lanes(encoder, false, 0x0);
+ chv_phy_post_pll_disable(encoder);
}
/*
@@ -3178,16 +2923,10 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(dport->base.base.crtc);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
unsigned long demph_reg_value, preemph_reg_value,
uniqtranscale_reg_value;
uint8_t train_set = intel_dp->train_set[0];
- enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3262,37 +3001,18 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
return 0;
}
- mutex_lock(&dev_priv->sb_lock);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
- uniqtranscale_reg_value);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
+ uniqtranscale_reg_value, 0);
return 0;
}
-static bool chv_need_uniq_trans_scale(uint8_t train_set)
-{
- return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
- (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
-}
-
static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
- u32 deemph_reg_value, margin_reg_value, val;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ u32 deemph_reg_value, margin_reg_value;
+ bool uniq_trans_scale = false;
uint8_t train_set = intel_dp->train_set[0];
- enum dpio_channel ch = vlv_dport_to_channel(dport);
- enum pipe pipe = intel_crtc->pipe;
- int i;
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3312,7 +3032,7 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
deemph_reg_value = 128;
margin_reg_value = 154;
- /* FIXME extra to set for 1200 */
+ uniq_trans_scale = true;
break;
default:
return 0;
@@ -3364,88 +3084,8 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
return 0;
}
- mutex_lock(&dev_priv->sb_lock);
-
- /* Clear calc init */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
- val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
- val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
- val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
-
- if (intel_crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
- val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
- val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
- val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
- }
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
- val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
- val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
-
- if (intel_crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
- val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
- val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
- }
-
- /* Program swing deemph */
- for (i = 0; i < intel_crtc->config->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
- val &= ~DPIO_SWING_DEEMPH9P5_MASK;
- val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
- }
-
- /* Program swing margin */
- for (i = 0; i < intel_crtc->config->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
-
- val &= ~DPIO_SWING_MARGIN000_MASK;
- val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
-
- /*
- * Supposedly this value shouldn't matter when unique transition
- * scale is disabled, but in fact it does matter. Let's just
- * always program the same value and hope it's OK.
- */
- val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
- val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
-
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
- }
-
- /*
- * The document said it needs to set bit 27 for ch0 and bit 26
- * for ch1. Might be a typo in the doc.
- * For now, for this unique transition scale selection, set bit
- * 27 for ch0 and ch1.
- */
- for (i = 0; i < intel_crtc->config->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
- if (chv_need_uniq_trans_scale(train_set))
- val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
- else
- val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
- }
-
- /* Start swing calculation */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
- val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
-
- if (intel_crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
- val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
- }
-
- mutex_unlock(&dev_priv->sb_lock);
+ chv_set_phy_signal_level(encoder, deemph_reg_value,
+ margin_reg_value, uniq_trans_scale);
return 0;
}
@@ -3714,7 +3354,6 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint8_t rev;
if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) < 0)
@@ -3771,6 +3410,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("PSR2 %s on sink",
dev_priv->psr.psr2_support ? "supported" : "not supported");
}
+
+ /* Read the eDP Display control capabilities registers */
+ memset(intel_dp->edp_dpcd, 0, sizeof(intel_dp->edp_dpcd));
+ if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
+ (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
+ intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
+ sizeof(intel_dp->edp_dpcd)))
+ DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
+ intel_dp->edp_dpcd);
}
DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
@@ -3778,10 +3426,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
/* Intermediate frequency support */
- if (is_edp(intel_dp) &&
- (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
- (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
- (rev >= 0x03)) { /* eDp v1.4 or higher */
+ if (is_edp(intel_dp) && (intel_dp->edp_dpcd[0] >= 0x03)) { /* eDp v1.4 or higher */
__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
int i;
@@ -5590,14 +5235,14 @@ void intel_edp_drrs_flush(struct drm_device *dev,
*
* DRRS saves power by switching to low RR based on usage scenarios.
*
- * eDP DRRS:-
- * The implementation is based on frontbuffer tracking implementation.
- * When there is a disturbance on the screen triggered by user activity or a
- * periodic system activity, DRRS is disabled (RR is changed to high RR).
- * When there is no movement on screen, after a timeout of 1 second, a switch
- * to low RR is made.
- * For integration with frontbuffer tracking code,
- * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
+ * The implementation is based on frontbuffer tracking implementation. When
+ * there is a disturbance on the screen triggered by user activity or a periodic
+ * system activity, DRRS is disabled (RR is changed to high RR). When there is
+ * no movement on screen, after a timeout of 1 second, a switch to low RR is
+ * made.
+ *
+ * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
+ * and intel_edp_drrs_flush() are called.
*
* DRRS can be further extended to support other internal panels and also
* the scenario of video playback wherein RR is set based on the rate
@@ -5725,8 +5370,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
fixed_mode = drm_mode_duplicate(dev,
dev_priv->vbt.lfp_lvds_vbt_mode);
- if (fixed_mode)
+ if (fixed_mode) {
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = fixed_mode->width_mm;
+ connector->display_info.height_mm = fixed_mode->height_mm;
+ }
}
mutex_unlock(&dev->mode_config.mutex);
@@ -5923,9 +5571,9 @@ fail:
return false;
}
-void
-intel_dp_init(struct drm_device *dev,
- i915_reg_t output_reg, enum port port)
+bool intel_dp_init(struct drm_device *dev,
+ i915_reg_t output_reg,
+ enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
@@ -5935,7 +5583,7 @@ intel_dp_init(struct drm_device *dev,
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
if (!intel_dig_port)
- return;
+ return false;
intel_connector = intel_connector_alloc();
if (!intel_connector)
@@ -5945,7 +5593,7 @@ intel_dp_init(struct drm_device *dev,
encoder = &intel_encoder->base;
if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
- DRM_MODE_ENCODER_TMDS, NULL))
+ DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port)))
goto err_encoder_init;
intel_encoder->compute_config = intel_dp_compute_config;
@@ -5992,7 +5640,7 @@ intel_dp_init(struct drm_device *dev,
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
goto err_init_connector;
- return;
+ return true;
err_init_connector:
drm_encoder_cleanup(encoder);
@@ -6000,8 +5648,7 @@ err_encoder_init:
kfree(intel_connector);
err_connector_alloc:
kfree(intel_dig_port);
-
- return;
+ return false;
}
void intel_dp_mst_suspend(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
new file mode 100644
index 000000000000..6532e226db29
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "intel_drv.h"
+
+static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
+{
+ uint8_t reg_val = 0;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
+ &reg_val) < 0) {
+ DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
+ DP_EDP_DISPLAY_CONTROL_REGISTER);
+ return;
+ }
+ if (enable)
+ reg_val |= DP_EDP_BACKLIGHT_ENABLE;
+ else
+ reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE);
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
+ reg_val) != 1) {
+ DRM_DEBUG_KMS("Failed to %s aux backlight\n",
+ enable ? "enable" : "disable");
+ }
+}
+
+/*
+ * Read the current backlight value from DPCD register(s) based
+ * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
+ */
+static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ uint8_t read_val[2] = { 0x0 };
+ uint16_t level = 0;
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
+ &read_val, sizeof(read_val)) < 0) {
+ DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
+ DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
+ return 0;
+ }
+ level = read_val[0];
+ if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
+ level = (read_val[0] << 8 | read_val[1]);
+
+ return level;
+}
+
+/*
+ * Sends the current backlight level over the aux channel, checking if its using
+ * 8-bit or 16 bit value (MSB and LSB)
+ */
+static void
+intel_dp_aux_set_backlight(struct intel_connector *connector, u32 level)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ uint8_t vals[2] = { 0x0 };
+
+ vals[0] = level;
+
+ /* Write the MSB and/or LSB */
+ if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) {
+ vals[0] = (level & 0xFF00) >> 8;
+ vals[1] = (level & 0xFF);
+ }
+ if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
+ vals, sizeof(vals)) < 0) {
+ DRM_DEBUG_KMS("Failed to write aux backlight level\n");
+ return;
+ }
+}
+
+static void intel_dp_aux_enable_backlight(struct intel_connector *connector)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ uint8_t dpcd_buf = 0;
+
+ set_aux_backlight_enable(intel_dp, true);
+
+ if ((drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) == 1) &&
+ ((dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) ==
+ DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET))
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ (dpcd_buf | DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD));
+}
+
+static void intel_dp_aux_disable_backlight(struct intel_connector *connector)
+{
+ set_aux_backlight_enable(enc_to_intel_dp(&connector->encoder->base), false);
+}
+
+static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
+ enum pipe pipe)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_panel *panel = &connector->panel;
+
+ intel_dp_aux_enable_backlight(connector);
+
+ if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
+ panel->backlight.max = 0xFFFF;
+ else
+ panel->backlight.max = 0xFF;
+
+ panel->backlight.min = 0;
+ panel->backlight.level = intel_dp_aux_get_backlight(connector);
+
+ panel->backlight.enabled = panel->backlight.level != 0;
+
+ return 0;
+}
+
+static bool
+intel_dp_aux_display_control_capable(struct intel_connector *connector)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+
+ /* Check the eDP Display control capabilities registers to determine if
+ * the panel can support backlight control over the aux channel
+ */
+ if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
+ (intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
+ !((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_PIN_ENABLE_CAP) ||
+ (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP))) {
+ DRM_DEBUG_KMS("AUX Backlight Control Supported!\n");
+ return true;
+ }
+ return false;
+}
+
+int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
+{
+ struct intel_panel *panel = &intel_connector->panel;
+
+ if (!i915.enable_dpcd_backlight)
+ return -ENODEV;
+
+ if (!intel_dp_aux_display_control_capable(intel_connector))
+ return -ENODEV;
+
+ panel->backlight.setup = intel_dp_aux_setup_backlight;
+ panel->backlight.enable = intel_dp_aux_enable_backlight;
+ panel->backlight.disable = intel_dp_aux_disable_backlight;
+ panel->backlight.set = intel_dp_aux_set_backlight;
+ panel->backlight.get = intel_dp_aux_get_backlight;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 7a34090cef34..f62ca9a126b3 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -534,7 +534,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_mst->primary = intel_dig_port;
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
- DRM_MODE_ENCODER_DPMST, NULL);
+ DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->crtc_mask = 0x7;
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
new file mode 100644
index 000000000000..288da35572b4
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright © 2014-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "intel_drv.h"
+
+void chv_set_phy_signal_level(struct intel_encoder *encoder,
+ u32 deemph_reg_value, u32 margin_reg_value,
+ bool uniq_trans_scale)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 val;
+ int i;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* Clear calc init */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ }
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+ }
+
+ /* Program swing deemph */
+ for (i = 0; i < intel_crtc->config->lane_count; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
+ val &= ~DPIO_SWING_DEEMPH9P5_MASK;
+ val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
+ }
+
+ /* Program swing margin */
+ for (i = 0; i < intel_crtc->config->lane_count; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+
+ val &= ~DPIO_SWING_MARGIN000_MASK;
+ val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
+
+ /*
+ * Supposedly this value shouldn't matter when unique transition
+ * scale is disabled, but in fact it does matter. Let's just
+ * always program the same value and hope it's OK.
+ */
+ val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
+ val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
+
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+ }
+
+ /*
+ * The document said it needs to set bit 27 for ch0 and bit 26
+ * for ch1. Might be a typo in the doc.
+ * For now, for this unique transition scale selection, set bit
+ * 27 for ch0 and ch1.
+ */
+ for (i = 0; i < intel_crtc->config->lane_count; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+ if (uniq_trans_scale)
+ val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
+ else
+ val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+ }
+
+ /* Start swing calculation */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ }
+
+ mutex_unlock(&dev_priv->sb_lock);
+
+}
+
+void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ bool reset)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ enum pipe pipe = crtc->pipe;
+ uint32_t val;
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ if (reset)
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ else
+ val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+ if (crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ if (reset)
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ else
+ val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+ }
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ if (reset)
+ val &= ~DPIO_PCS_CLK_SOFT_RESET;
+ else
+ val |= DPIO_PCS_CLK_SOFT_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+ if (crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ if (reset)
+ val &= ~DPIO_PCS_CLK_SOFT_RESET;
+ else
+ val |= DPIO_PCS_CLK_SOFT_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+ }
+}
+
+void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ unsigned int lane_mask =
+ intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
+ u32 val;
+
+ /*
+ * Must trick the second common lane into life.
+ * Otherwise we can't even access the PLL.
+ */
+ if (ch == DPIO_CH0 && pipe == PIPE_B)
+ dport->release_cl2_override =
+ !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
+
+ chv_phy_powergate_lanes(encoder, true, lane_mask);
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* Assert data lane reset */
+ chv_data_lane_soft_reset(encoder, true);
+
+ /* program left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA1_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA1_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA2_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA2_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
+ /* program clock channel usage */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+ }
+
+ /*
+ * This a a bit weird since generally CL
+ * matches the pipe, but here we need to
+ * pick the CL based on the port.
+ */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+ if (pipe != PIPE_B)
+ val &= ~CHV_CMN_USEDCLKCHANNEL;
+ else
+ val |= CHV_CMN_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+ int data, i, stagger;
+ u32 val;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* allow hardware to manage TX FIFO reset source */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+ }
+
+ /* Program Tx lane latency optimal setting*/
+ for (i = 0; i < intel_crtc->config->lane_count; i++) {
+ /* Set the upar bit */
+ if (intel_crtc->config->lane_count == 1)
+ data = 0x0;
+ else
+ data = (i == 1) ? 0x0 : 0x1;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
+ data << DPIO_UPAR_SHIFT);
+ }
+
+ /* Data lane stagger programming */
+ if (intel_crtc->config->port_clock > 270000)
+ stagger = 0x18;
+ else if (intel_crtc->config->port_clock > 135000)
+ stagger = 0xd;
+ else if (intel_crtc->config->port_clock > 67500)
+ stagger = 0x7;
+ else if (intel_crtc->config->port_clock > 33750)
+ stagger = 0x4;
+ else
+ stagger = 0x2;
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val |= DPIO_TX2_STAGGER_MASK(0x1f);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val |= DPIO_TX2_STAGGER_MASK(0x1f);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+ }
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
+ DPIO_LANESTAGGER_STRAP(stagger) |
+ DPIO_LANESTAGGER_STRAP_OVRD |
+ DPIO_TX1_STAGGER_MASK(0x1f) |
+ DPIO_TX1_STAGGER_MULT(6) |
+ DPIO_TX2_STAGGER_MULT(0));
+
+ if (intel_crtc->config->lane_count > 2) {
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
+ DPIO_LANESTAGGER_STRAP(stagger) |
+ DPIO_LANESTAGGER_STRAP_OVRD |
+ DPIO_TX1_STAGGER_MASK(0x1f) |
+ DPIO_TX1_STAGGER_MULT(7) |
+ DPIO_TX2_STAGGER_MULT(5));
+ }
+
+ /* Deassert data lane reset */
+ chv_data_lane_soft_reset(encoder, false);
+
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+void chv_phy_release_cl2_override(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (dport->release_cl2_override) {
+ chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
+ dport->release_cl2_override = false;
+ }
+}
+
+void chv_phy_post_pll_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+ u32 val;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* disable left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
+ mutex_unlock(&dev_priv->sb_lock);
+
+ /*
+ * Leave the power down bit cleared for at least one
+ * lane so that chv_powergate_phy_ch() will power
+ * on something when the channel is otherwise unused.
+ * When the port is off and the override is removed
+ * the lanes power down anyway, so otherwise it doesn't
+ * really matter what the state of power down bits is
+ * after this.
+ */
+ chv_phy_powergate_lanes(encoder, false, 0x0);
+}
+
+void vlv_set_phy_signal_level(struct intel_encoder *encoder,
+ u32 demph_reg_value, u32 preemph_reg_value,
+ u32 uniqtranscale_reg_value, u32 tx3_demph)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+
+ mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
+ uniqtranscale_reg_value);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
+
+ if (tx3_demph)
+ vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+
+ /* Program Tx lane resets to default */
+ mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
+ DPIO_PCS_TX_LANE2_RESET |
+ DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
+ DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
+ DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
+ (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
+ DPIO_PCS_CLK_SOFT_RESET);
+
+ /* Fix up inter-pair skew failure */
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+ u32 val;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* Enable clock channels for this port */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
+ val = 0;
+ if (pipe)
+ val |= (1<<21);
+ else
+ val &= ~(1<<21);
+ val |= 0x001000c4;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+
+ /* Program lane clock */
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
+
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+void vlv_phy_reset_lanes(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+
+ mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
+ mutex_unlock(&dev_priv->sb_lock);
+}
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 3ac705936b04..c0eff1571731 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -208,8 +208,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
if (memcmp(&crtc_state->dpll_hw_state,
&shared_dpll[i].hw_state,
sizeof(crtc_state->dpll_hw_state)) == 0) {
- DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, active %x)\n",
- crtc->base.base.id, pll->name,
+ DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
+ crtc->base.base.id, crtc->base.name, pll->name,
shared_dpll[i].crtc_mask,
pll->active_mask);
return pll;
@@ -220,8 +220,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
for (i = range_min; i <= range_max; i++) {
pll = &dev_priv->shared_dplls[i];
if (shared_dpll[i].crtc_mask == 0) {
- DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
- crtc->base.base.id, pll->name);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
+ crtc->base.base.id, crtc->base.name, pll->name);
return pll;
}
}
@@ -358,14 +358,17 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
i = (enum intel_dpll_id) crtc->pipe;
pll = &dev_priv->shared_dplls[i];
- DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
- crtc->base.base.id, pll->name);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
+ crtc->base.base.id, crtc->base.name, pll->name);
} else {
pll = intel_find_shared_dpll(crtc, crtc_state,
DPLL_ID_PCH_PLL_A,
DPLL_ID_PCH_PLL_B);
}
+ if (!pll)
+ return NULL;
+
/* reference the pll */
intel_reference_shared_dpll(pll, crtc_state);
@@ -1236,9 +1239,6 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
case 162000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
break;
- /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
- results in CDCLK change. Need to handle the change of CDCLK by
- disabling pipes and re-enabling them */
case 108000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
break;
@@ -1508,7 +1508,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
int clock = crtc_state->port_clock;
if (encoder->type == INTEL_OUTPUT_HDMI) {
- intel_clock_t best_clock;
+ struct dpll best_clock;
/* Calculate HDMI div */
/*
@@ -1613,8 +1613,8 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
i = (enum intel_dpll_id) intel_dig_port->port;
pll = intel_get_shared_dpll_by_id(dev_priv, i);
- DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
- crtc->base.base.id, pll->name);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
+ crtc->base.base.id, crtc->base.name, pll->name);
intel_reference_shared_dpll(pll, crtc_state);
@@ -1633,18 +1633,10 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
static void intel_ddi_pll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t val = I915_READ(LCPLL_CTL);
-
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
- int cdclk_freq;
-
- cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
- dev_priv->skl_boot_cdclk = cdclk_freq;
- if (skl_sanitize_cdclk(dev_priv))
- DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
- if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
- DRM_ERROR("LCPLL1 is disabled\n");
- } else if (!IS_BROXTON(dev_priv)) {
+
+ if (INTEL_GEN(dev_priv) < 9) {
+ uint32_t val = I915_READ(LCPLL_CTL);
+
/*
* The LCPLL register should be turned on by the BIOS. For now
* let's just check its state and print errors in case
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a28b4aac1e02..ebe7b3427e2e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -266,7 +266,7 @@ struct intel_connector {
struct intel_dp *mst_port;
};
-typedef struct dpll {
+struct dpll {
/* given values */
int n;
int m1, m2;
@@ -276,7 +276,7 @@ typedef struct dpll {
int vco;
int m;
int p;
-} intel_clock_t;
+};
struct intel_atomic_state {
struct drm_atomic_state base;
@@ -291,17 +291,32 @@ struct intel_atomic_state {
bool dpll_set, modeset;
+ /*
+ * Does this transaction change the pipes that are active? This mask
+ * tracks which CRTC's have changed their active state at the end of
+ * the transaction (not counting the temporary disable during modesets).
+ * This mask should only be non-zero when intel_state->modeset is true,
+ * but the converse is not necessarily true; simply changing a mode may
+ * not flip the final active status of any CRTC's
+ */
+ unsigned int active_pipe_changes;
+
unsigned int active_crtcs;
unsigned int min_pixclk[I915_MAX_PIPES];
+ /* SKL/KBL Only */
+ unsigned int cdclk_pll_vco;
+
struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
- struct intel_wm_config wm_config;
/*
* Current watermarks can't be trusted during hardware readout, so
* don't bother calculating intermediate watermarks.
*/
bool skip_intermediate_wm;
+
+ /* Gen9+ only */
+ struct skl_wm_values wm_results;
};
struct intel_plane_state {
@@ -405,6 +420,48 @@ struct skl_pipe_wm {
uint32_t linetime;
};
+struct intel_crtc_wm_state {
+ union {
+ struct {
+ /*
+ * Intermediate watermarks; these can be
+ * programmed immediately since they satisfy
+ * both the current configuration we're
+ * switching away from and the new
+ * configuration we're switching to.
+ */
+ struct intel_pipe_wm intermediate;
+
+ /*
+ * Optimal watermarks, programmed post-vblank
+ * when this state is committed.
+ */
+ struct intel_pipe_wm optimal;
+ } ilk;
+
+ struct {
+ /* gen9+ only needs 1-step wm programming */
+ struct skl_pipe_wm optimal;
+
+ /* cached plane data rate */
+ unsigned plane_data_rate[I915_MAX_PLANES];
+ unsigned plane_y_data_rate[I915_MAX_PLANES];
+
+ /* minimum block allocation */
+ uint16_t minimum_blocks[I915_MAX_PLANES];
+ uint16_t minimum_y_blocks[I915_MAX_PLANES];
+ } skl;
+ };
+
+ /*
+ * Platforms with two-step watermark programming will need to
+ * update watermark programming post-vblank to switch from the
+ * safe intermediate watermarks to the optimal final
+ * watermarks.
+ */
+ bool need_postvbl_update;
+};
+
struct intel_crtc_state {
struct drm_crtc_state base;
@@ -558,32 +615,7 @@ struct intel_crtc_state {
/* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */
bool disable_lp_wm;
- struct {
- /*
- * Optimal watermarks, programmed post-vblank when this state
- * is committed.
- */
- union {
- struct intel_pipe_wm ilk;
- struct skl_pipe_wm skl;
- } optimal;
-
- /*
- * Intermediate watermarks; these can be programmed immediately
- * since they satisfy both the current configuration we're
- * switching away from and the new configuration we're switching
- * to.
- */
- struct intel_pipe_wm intermediate;
-
- /*
- * Platforms with two-step watermark programming will need to
- * update watermark programming post-vblank to switch from the
- * safe intermediate watermarks to the optimal final
- * watermarks.
- */
- bool need_postvbl_update;
- } wm;
+ struct intel_crtc_wm_state wm;
/* Gamma mode programmed on the pipe */
uint32_t gamma_mode;
@@ -598,14 +630,6 @@ struct vlv_wm_state {
bool cxsr;
};
-struct intel_mmio_flip {
- struct work_struct work;
- struct drm_i915_private *i915;
- struct drm_i915_gem_request *req;
- struct intel_crtc *crtc;
- unsigned int rotation;
-};
-
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -620,7 +644,7 @@ struct intel_crtc {
unsigned long enabled_power_domains;
bool lowfreq_avail;
struct intel_overlay *overlay;
- struct intel_unpin_work *unpin_work;
+ struct intel_flip_work *flip_work;
atomic_t unpin_work_count;
@@ -815,6 +839,7 @@ struct intel_dp {
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
+ uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
/* sink rates as reported by DP_SUPPORTED_LINK_RATES */
uint8_t num_sink_rates;
int sink_rates[DP_MAX_SUPPORTED_RATES];
@@ -947,22 +972,21 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
return dev_priv->plane_to_crtc_mapping[plane];
}
-struct intel_unpin_work {
- struct work_struct work;
+struct intel_flip_work {
+ struct work_struct unpin_work;
+ struct work_struct mmio_work;
+
struct drm_crtc *crtc;
struct drm_framebuffer *old_fb;
struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
atomic_t pending;
-#define INTEL_FLIP_INACTIVE 0
-#define INTEL_FLIP_PENDING 1
-#define INTEL_FLIP_COMPLETE 2
u32 flip_count;
u32 gtt_offset;
struct drm_i915_gem_request *flip_queued_req;
u32 flip_queued_vblank;
u32 flip_ready_vblank;
- bool enable_stall_check;
+ unsigned int rotation;
};
struct intel_load_detect_pipe {
@@ -1031,9 +1055,9 @@ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void gen6_reset_rps_interrupts(struct drm_device *dev);
-void gen6_enable_rps_interrupts(struct drm_device *dev);
-void gen6_disable_rps_interrupts(struct drm_device *dev);
+void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
+void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
+void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
@@ -1112,14 +1136,16 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
/* intel_display.c */
+void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
+void intel_update_rawclk(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
extern const struct drm_plane_funcs intel_plane_funcs;
void intel_init_display_hooks(struct drm_i915_private *dev_priv);
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
bool intel_has_pending_fb_unpin(struct drm_device *dev);
-void intel_mark_busy(struct drm_device *dev);
-void intel_mark_idle(struct drm_device *dev);
+void intel_mark_busy(struct drm_i915_private *dev_priv);
+void intel_mark_idle(struct drm_i915_private *dev_priv);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
int intel_display_suspend(struct drm_device *dev);
void intel_encoder_destroy(struct drm_encoder *encoder);
@@ -1151,6 +1177,9 @@ intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe)
if (crtc->active)
intel_wait_for_vblank(dev, pipe);
}
+
+u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
+
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport,
@@ -1164,14 +1193,14 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx);
int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
unsigned int rotation);
+void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
-void intel_prepare_page_flip(struct drm_device *dev, int plane);
-void intel_finish_page_flip(struct drm_device *dev, int pipe);
-void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
-void intel_check_page_flip(struct drm_device *dev, int pipe);
+void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe);
+void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe);
+void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
int intel_prepare_plane_fb(struct drm_plane *plane,
const struct drm_plane_state *new_state);
void intel_cleanup_plane_fb(struct drm_plane *plane,
@@ -1228,13 +1257,12 @@ u32 intel_compute_tile_offset(int *x, int *y,
const struct drm_framebuffer *fb, int plane,
unsigned int pitch,
unsigned int rotation);
-void intel_prepare_reset(struct drm_device *dev);
-void intel_finish_reset(struct drm_device *dev);
+void intel_prepare_reset(struct drm_i915_private *dev_priv);
+void intel_finish_reset(struct drm_i915_private *dev_priv);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void broxton_init_cdclk(struct drm_i915_private *dev_priv);
void broxton_uninit_cdclk(struct drm_i915_private *dev_priv);
-bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv);
void broxton_ddi_phy_init(struct drm_i915_private *dev_priv);
void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv);
void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv);
@@ -1243,8 +1271,8 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
void gen9_enable_dc5(struct drm_i915_private *dev_priv);
void skl_init_cdclk(struct drm_i915_private *dev_priv);
-int skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
+unsigned int skl_cdclk_get_vco(unsigned int freq);
void skl_enable_dc6(struct drm_i915_private *dev_priv);
void skl_disable_dc6(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
@@ -1252,8 +1280,8 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
- intel_clock_t *best_clock);
-int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock);
+ struct dpll *best_clock);
+int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
bool intel_crtc_active(struct drm_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
@@ -1284,7 +1312,7 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
void intel_csr_ucode_resume(struct drm_i915_private *);
/* intel_dp.c */
-void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
+bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
@@ -1339,12 +1367,22 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
+static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
+{
+ return ~((1 << lane_count) - 1) & 0xf;
+}
+
+/* intel_dp_aux_backlight.c */
+int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
+
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
/* intel_dsi.c */
void intel_dsi_init(struct drm_device *dev);
+/* intel_dsi_dcs_backlight.c */
+int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
/* intel_dvo.c */
void intel_dvo_init(struct drm_device *dev);
@@ -1424,13 +1462,13 @@ void intel_attach_aspect_ratio_property(struct drm_connector *connector);
/* intel_overlay.c */
-void intel_setup_overlay(struct drm_device *dev);
-void intel_cleanup_overlay(struct drm_device *dev);
+void intel_setup_overlay(struct drm_i915_private *dev_priv);
+void intel_cleanup_overlay(struct drm_i915_private *dev_priv);
int intel_overlay_switch_off(struct intel_overlay *overlay);
-int intel_overlay_put_image(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int intel_overlay_attrs(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
void intel_overlay_reset(struct drm_i915_private *dev_priv);
@@ -1601,21 +1639,20 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
void intel_pm_setup(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
-void intel_init_gt_powersave(struct drm_device *dev);
-void intel_cleanup_gt_powersave(struct drm_device *dev);
-void intel_enable_gt_powersave(struct drm_device *dev);
-void intel_disable_gt_powersave(struct drm_device *dev);
-void intel_suspend_gt_powersave(struct drm_device *dev);
-void intel_reset_gt_powersave(struct drm_device *dev);
-void gen6_update_ring_freq(struct drm_device *dev);
+void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_reset_gt_powersave(struct drm_i915_private *dev_priv);
+void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv,
struct intel_rps_client *rps,
unsigned long submitted);
-void intel_queue_rps_boost_for_request(struct drm_device *dev,
- struct drm_i915_gem_request *req);
+void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
@@ -1623,7 +1660,11 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
bool ilk_disable_lp_wm(struct drm_device *dev);
-int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6);
+int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
+static inline int intel_enable_rc6(void)
+{
+ return i915.enable_rc6;
+}
/* intel_sdvo.c */
bool intel_sdvo_init(struct drm_device *dev,
@@ -1635,7 +1676,7 @@ int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_pipe_update_start(struct intel_crtc *crtc);
-void intel_pipe_update_end(struct intel_crtc *crtc);
+void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work);
/* intel_tv.c */
void intel_tv_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 366ad6c67ce4..c70132aa91d5 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -532,7 +532,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
enum port port;
- u32 tmp;
DRM_DEBUG_KMS("\n");
@@ -551,11 +550,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
msleep(intel_dsi->panel_on_delay);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ u32 val;
+
/* Disable DPOunit clock gating, can stall pipe */
- tmp = I915_READ(DSPCLK_GATE_D);
- tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, tmp);
+ val = I915_READ(DSPCLK_GATE_D);
+ val |= DPOUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, val);
}
/* put device in ready state */
@@ -693,7 +694,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
intel_dsi_clear_device_ready(encoder);
- if (!IS_BROXTON(dev_priv)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 val;
val = I915_READ(DSPCLK_GATE_D);
@@ -1449,7 +1450,7 @@ void intel_dsi_init(struct drm_device *dev)
connector = &intel_connector->base;
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
- NULL);
+ "DSI %c", port_name(port));
intel_encoder->compute_config = intel_dsi_compute_config;
intel_encoder->pre_enable = intel_dsi_pre_enable;
@@ -1473,10 +1474,42 @@ void intel_dsi_init(struct drm_device *dev)
else
intel_encoder->crtc_mask = BIT(PIPE_B);
- if (dev_priv->vbt.dsi.config->dual_link)
+ if (dev_priv->vbt.dsi.config->dual_link) {
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
- else
+
+ switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
+ case DL_DCS_PORT_A:
+ intel_dsi->dcs_backlight_ports = BIT(PORT_A);
+ break;
+ case DL_DCS_PORT_C:
+ intel_dsi->dcs_backlight_ports = BIT(PORT_C);
+ break;
+ default:
+ case DL_DCS_PORT_A_AND_C:
+ intel_dsi->dcs_backlight_ports = BIT(PORT_A) | BIT(PORT_C);
+ break;
+ }
+
+ switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
+ case DL_DCS_PORT_A:
+ intel_dsi->dcs_cabc_ports = BIT(PORT_A);
+ break;
+ case DL_DCS_PORT_C:
+ intel_dsi->dcs_cabc_ports = BIT(PORT_C);
+ break;
+ default:
+ case DL_DCS_PORT_A_AND_C:
+ intel_dsi->dcs_cabc_ports = BIT(PORT_A) | BIT(PORT_C);
+ break;
+ }
+ } else {
intel_dsi->ports = BIT(port);
+ intel_dsi->dcs_backlight_ports = BIT(port);
+ intel_dsi->dcs_cabc_ports = BIT(port);
+ }
+
+ if (!dev_priv->vbt.dsi.config->cabc_supported)
+ intel_dsi->dcs_cabc_ports = 0;
/* Create a DSI host (and a device) for each port. */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -1545,6 +1578,9 @@ void intel_dsi_init(struct drm_device *dev)
goto err;
}
+ connector->display_info.width_mm = fixed_mode->width_mm;
+ connector->display_info.height_mm = fixed_mode->height_mm;
+
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_dsi_add_properties(intel_connector);
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 61a6957fc6c2..5967ea6d6045 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -78,6 +78,10 @@ struct intel_dsi {
u8 escape_clk_div;
u8 dual_link;
+
+ u16 dcs_backlight_ports;
+ u16 dcs_cabc_ports;
+
u8 pixel_overlap;
u32 port_bits;
u32 bw_timer;
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
new file mode 100644
index 000000000000..f0dc427743f8
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Deepak M <m.deepak at intel.com>
+ */
+
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "i915_drv.h"
+#include <video/mipi_display.h>
+#include <drm/drm_mipi_dsi.h>
+
+#define CONTROL_DISPLAY_BCTRL (1 << 5)
+#define CONTROL_DISPLAY_DD (1 << 3)
+#define CONTROL_DISPLAY_BL (1 << 2)
+
+#define POWER_SAVE_OFF (0 << 0)
+#define POWER_SAVE_LOW (1 << 0)
+#define POWER_SAVE_MEDIUM (2 << 0)
+#define POWER_SAVE_HIGH (3 << 0)
+#define POWER_SAVE_OUTDOOR_MODE (4 << 0)
+
+#define PANEL_PWM_MAX_VALUE 0xFF
+
+static u32 dcs_get_backlight(struct intel_connector *connector)
+{
+ struct intel_encoder *encoder = connector->encoder;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct mipi_dsi_device *dsi_device;
+ u8 data;
+ enum port port;
+
+ /* FIXME: Need to take care of 16 bit brightness level */
+ for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+ mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
+ &data, sizeof(data));
+ break;
+ }
+
+ return data;
+}
+
+static void dcs_set_backlight(struct intel_connector *connector, u32 level)
+{
+ struct intel_encoder *encoder = connector->encoder;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct mipi_dsi_device *dsi_device;
+ u8 data = level;
+ enum port port;
+
+ /* FIXME: Need to take care of 16 bit brightness level */
+ for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+ mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ &data, sizeof(data));
+ }
+}
+
+static void dcs_disable_backlight(struct intel_connector *connector)
+{
+ struct intel_encoder *encoder = connector->encoder;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct mipi_dsi_device *dsi_device;
+ enum port port;
+
+ dcs_set_backlight(connector, 0);
+
+ for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
+ u8 cabc = POWER_SAVE_OFF;
+
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+ mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE,
+ &cabc, sizeof(cabc));
+ }
+
+ for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ u8 ctrl = 0;
+
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+
+ mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY,
+ &ctrl, sizeof(ctrl));
+
+ ctrl &= ~CONTROL_DISPLAY_BL;
+ ctrl &= ~CONTROL_DISPLAY_DD;
+ ctrl &= ~CONTROL_DISPLAY_BCTRL;
+
+ mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ &ctrl, sizeof(ctrl));
+ }
+}
+
+static void dcs_enable_backlight(struct intel_connector *connector)
+{
+ struct intel_encoder *encoder = connector->encoder;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_panel *panel = &connector->panel;
+ struct mipi_dsi_device *dsi_device;
+ enum port port;
+
+ for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ u8 ctrl = 0;
+
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+
+ mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY,
+ &ctrl, sizeof(ctrl));
+
+ ctrl |= CONTROL_DISPLAY_BL;
+ ctrl |= CONTROL_DISPLAY_DD;
+ ctrl |= CONTROL_DISPLAY_BCTRL;
+
+ mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ &ctrl, sizeof(ctrl));
+ }
+
+ for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
+ u8 cabc = POWER_SAVE_MEDIUM;
+
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+ mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE,
+ &cabc, sizeof(cabc));
+ }
+
+ dcs_set_backlight(connector, panel->backlight.level);
+}
+
+static int dcs_setup_backlight(struct intel_connector *connector,
+ enum pipe unused)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.max = PANEL_PWM_MAX_VALUE;
+ panel->backlight.level = PANEL_PWM_MAX_VALUE;
+
+ return 0;
+}
+
+int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
+{
+ struct drm_device *dev = intel_connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *encoder = intel_connector->encoder;
+ struct intel_panel *panel = &intel_connector->panel;
+
+ if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
+ return -ENODEV;
+
+ if (WARN_ON(encoder->type != INTEL_OUTPUT_DSI))
+ return -EINVAL;
+
+ panel->backlight.setup = dcs_setup_backlight;
+ panel->backlight.enable = dcs_enable_backlight;
+ panel->backlight.disable = dcs_disable_backlight;
+ panel->backlight.set = dcs_set_backlight;
+ panel->backlight.get = dcs_get_backlight;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index e498f1c3221e..f122484bedfc 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -95,6 +95,24 @@ static struct gpio_map vlv_gpio_table[] = {
{ VLV_GPIO_NC_11_PANEL1_BKLTCTL },
};
+#define CHV_GPIO_IDX_START_N 0
+#define CHV_GPIO_IDX_START_E 73
+#define CHV_GPIO_IDX_START_SW 100
+#define CHV_GPIO_IDX_START_SE 198
+
+#define CHV_VBT_MAX_PINS_PER_FMLY 15
+
+#define CHV_GPIO_PAD_CFG0(f, i) (0x4400 + (f) * 0x400 + (i) * 8)
+#define CHV_GPIO_GPIOEN (1 << 15)
+#define CHV_GPIO_GPIOCFG_GPIO (0 << 8)
+#define CHV_GPIO_GPIOCFG_GPO (1 << 8)
+#define CHV_GPIO_GPIOCFG_GPI (2 << 8)
+#define CHV_GPIO_GPIOCFG_HIZ (3 << 8)
+#define CHV_GPIO_GPIOTXSTATE(state) ((!!(state)) << 1)
+
+#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4)
+#define CHV_GPIO_CFGLOCK (1 << 31)
+
static inline enum port intel_dsi_seq_port_to_port(u8 port)
{
return port ? PORT_C : PORT_A;
@@ -203,13 +221,14 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
map = &vlv_gpio_table[gpio_index];
if (dev_priv->vbt.dsi.seq_version >= 3) {
- DRM_DEBUG_KMS("GPIO element v3 not supported\n");
- return;
+ /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
+ port = IOSF_PORT_GPIO_NC;
} else {
if (gpio_source == 0) {
port = IOSF_PORT_GPIO_NC;
} else if (gpio_source == 1) {
- port = IOSF_PORT_GPIO_SC;
+ DRM_DEBUG_KMS("SC gpio not supported\n");
+ return;
} else {
DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
return;
@@ -231,6 +250,56 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->sb_lock);
}
+static void chv_exec_gpio(struct drm_i915_private *dev_priv,
+ u8 gpio_source, u8 gpio_index, bool value)
+{
+ u16 cfg0, cfg1;
+ u16 family_num;
+ u8 port;
+
+ if (dev_priv->vbt.dsi.seq_version >= 3) {
+ if (gpio_index >= CHV_GPIO_IDX_START_SE) {
+ /* XXX: it's unclear whether 255->57 is part of SE. */
+ gpio_index -= CHV_GPIO_IDX_START_SE;
+ port = CHV_IOSF_PORT_GPIO_SE;
+ } else if (gpio_index >= CHV_GPIO_IDX_START_SW) {
+ gpio_index -= CHV_GPIO_IDX_START_SW;
+ port = CHV_IOSF_PORT_GPIO_SW;
+ } else if (gpio_index >= CHV_GPIO_IDX_START_E) {
+ gpio_index -= CHV_GPIO_IDX_START_E;
+ port = CHV_IOSF_PORT_GPIO_E;
+ } else {
+ port = CHV_IOSF_PORT_GPIO_N;
+ }
+ } else {
+ /* XXX: The spec is unclear about CHV GPIO on seq v2 */
+ if (gpio_source != 0) {
+ DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
+ return;
+ }
+
+ if (gpio_index >= CHV_GPIO_IDX_START_E) {
+ DRM_DEBUG_KMS("invalid gpio index %u for GPIO N\n",
+ gpio_index);
+ return;
+ }
+
+ port = CHV_IOSF_PORT_GPIO_N;
+ }
+
+ family_num = gpio_index / CHV_VBT_MAX_PINS_PER_FMLY;
+ gpio_index = gpio_index % CHV_VBT_MAX_PINS_PER_FMLY;
+
+ cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index);
+ cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index);
+
+ mutex_lock(&dev_priv->sb_lock);
+ vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
+ vlv_iosf_sb_write(dev_priv, port, cfg0,
+ CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value));
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *dev = intel_dsi->base.base.dev;
@@ -254,6 +323,8 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
if (IS_VALLEYVIEW(dev_priv))
vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+ else if (IS_CHERRYVIEW(dev_priv))
+ chv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
else
DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 286baec979c8..a456f2eb68b6 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -406,6 +406,18 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
return mode;
}
+static char intel_dvo_port_name(i915_reg_t dvo_reg)
+{
+ if (i915_mmio_reg_equal(dvo_reg, DVOA))
+ return 'A';
+ else if (i915_mmio_reg_equal(dvo_reg, DVOB))
+ return 'B';
+ else if (i915_mmio_reg_equal(dvo_reg, DVOC))
+ return 'C';
+ else
+ return '?';
+}
+
void intel_dvo_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -428,8 +440,6 @@ void intel_dvo_init(struct drm_device *dev)
intel_dvo->attached_connector = intel_connector;
intel_encoder = &intel_dvo->base;
- drm_encoder_init(dev, &intel_encoder->base,
- &intel_dvo_enc_funcs, encoder_type, NULL);
intel_encoder->disable = intel_disable_dvo;
intel_encoder->enable = intel_enable_dvo;
@@ -496,6 +506,10 @@ void intel_dvo_init(struct drm_device *dev)
if (!dvoinit)
continue;
+ drm_encoder_init(dev, &intel_encoder->base,
+ &intel_dvo_enc_funcs, encoder_type,
+ "DVO %c", intel_dvo_port_name(dvo->dvo_reg));
+
intel_encoder->type = INTEL_OUTPUT_DVO;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
switch (dvo->type) {
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index d5a7cfec589b..45ee07b888a0 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -374,8 +374,9 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
* @dev_priv: i915 device instance
*
* This function is used to verify the current state of FBC.
+ *
* FIXME: This should be tracked in the plane config eventually
- * instead of queried at runtime for most callers.
+ * instead of queried at runtime for most callers.
*/
bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
{
@@ -740,7 +741,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
/* FIXME: We lack the proper locking here, so only run this on the
* platforms that need. */
- if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7)
+ if (IS_GEN(dev_priv, 5, 6))
cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
cache->fb.pixel_format = fb->pixel_format;
cache->fb.stride = fb->pitches[0];
@@ -827,7 +828,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
bool enable_by_default = IS_HASWELL(dev_priv) ||
IS_BROADWELL(dev_priv);
- if (intel_vgpu_active(dev_priv->dev)) {
+ if (intel_vgpu_active(dev_priv)) {
fbc->no_fbc_reason = "VGPU is active";
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index ab8d09a81f14..ef8e67690f3d 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -150,10 +150,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
if (size * 2 < ggtt->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL)
- obj = i915_gem_alloc_object(dev, size);
- if (!obj) {
+ obj = i915_gem_object_create(dev, size);
+ if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(obj);
goto out;
}
@@ -186,9 +186,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct fb_info *info;
struct drm_framebuffer *fb;
+ struct i915_vma *vma;
struct drm_i915_gem_object *obj;
- int size, ret;
bool prealloc = false;
+ void *vaddr;
+ int ret;
if (intel_fb &&
(sizes->fb_width > intel_fb->base.width ||
@@ -214,7 +216,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
}
obj = intel_fb->obj;
- size = obj->base.size;
mutex_lock(&dev->struct_mutex);
@@ -244,22 +245,23 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &intelfb_ops;
+ vma = i915_gem_obj_to_ggtt(obj);
+
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = ggtt->mappable_end;
- info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
- info->fix.smem_len = size;
+ info->fix.smem_start = dev->mode_config.fb_base + vma->node.start;
+ info->fix.smem_len = vma->node.size;
- info->screen_base =
- ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj),
- size);
- if (!info->screen_base) {
+ vaddr = i915_vma_pin_iomap(vma);
+ if (IS_ERR(vaddr)) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
- ret = -ENOSPC;
+ ret = PTR_ERR(vaddr);
goto out_destroy_fbi;
}
- info->screen_size = size;
+ info->screen_base = vaddr;
+ info->screen_size = vma->node.size;
/* This driver doesn't need a VT switch to restore the mode on resume */
info->skip_vt_switch = true;
@@ -287,7 +289,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_unpin:
- i915_gem_object_ggtt_unpin(obj);
+ intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -488,10 +490,10 @@ retry:
}
crtcs[i] = new_crtc;
- DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
+ DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
connector->name,
- pipe_name(to_intel_crtc(connector->state->crtc)->pipe),
connector->state->crtc->base.id,
+ connector->state->crtc->name,
modes[i]->hdisplay, modes[i]->vdisplay,
modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
@@ -551,6 +553,11 @@ static void intel_fbdev_destroy(struct drm_device *dev,
if (ifbdev->fb) {
drm_framebuffer_unregister_private(&ifbdev->fb->base);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
+ mutex_unlock(&dev->struct_mutex);
+
drm_framebuffer_remove(&ifbdev->fb->base);
}
}
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 9d79c4c3e256..41601c71f529 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -48,14 +48,23 @@ struct drm_i915_gem_request;
* queue (a circular array of work items), again described in the process
* descriptor. Work queue pages are mapped momentarily as required.
*
- * Finally, we also keep a few statistics here, including the number of
- * submissions to each engine, and a record of the last submission failure
- * (if any).
+ * We also keep a few statistics on failures. Ideally, these should all
+ * be zero!
+ * no_wq_space: times that the submission pre-check found no space was
+ * available in the work queue (note, the queue is shared,
+ * not per-engine). It is OK for this to be nonzero, but
+ * it should not be huge!
+ * q_fail: failed to enqueue a work item. This should never happen,
+ * because we check for space beforehand.
+ * b_fail: failed to ring the doorbell. This should never happen, unless
+ * somehow the hardware misbehaves, or maybe if the GuC firmware
+ * crashes? We probably need to reset the GPU to recover.
+ * retcode: errno from last guc_submit()
*/
struct i915_guc_client {
struct drm_i915_gem_object *client_obj;
void *client_base; /* first page (only) of above */
- struct intel_context *owner;
+ struct i915_gem_context *owner;
struct intel_guc *guc;
uint32_t priority;
uint32_t ctx_index;
@@ -71,12 +80,13 @@ struct i915_guc_client {
uint32_t wq_tail;
uint32_t unused; /* Was 'wq_head' */
- /* GuC submission statistics & status */
- uint64_t submissions[GUC_MAX_ENGINES_NUM];
- uint32_t q_fail;
+ uint32_t no_wq_space;
+ uint32_t q_fail; /* No longer used */
uint32_t b_fail;
int retcode;
- int spare; /* pad to 32 DWords */
+
+ /* Per-engine counts of GuC submissions */
+ uint64_t submissions[GUC_MAX_ENGINES_NUM];
};
enum intel_guc_fw_status {
@@ -138,9 +148,9 @@ struct intel_guc {
};
/* intel_guc_loader.c */
-extern void intel_guc_ucode_init(struct drm_device *dev);
-extern int intel_guc_ucode_load(struct drm_device *dev);
-extern void intel_guc_ucode_fini(struct drm_device *dev);
+extern void intel_guc_init(struct drm_device *dev);
+extern int intel_guc_setup(struct drm_device *dev);
+extern void intel_guc_fini(struct drm_device *dev);
extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status);
extern int intel_guc_suspend(struct drm_device *dev);
extern int intel_guc_resume(struct drm_device *dev);
@@ -148,10 +158,9 @@ extern int intel_guc_resume(struct drm_device *dev);
/* i915_guc_submission.c */
int i915_guc_submission_init(struct drm_device *dev);
int i915_guc_submission_enable(struct drm_device *dev);
-int i915_guc_submit(struct i915_guc_client *client,
- struct drm_i915_gem_request *rq);
+int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
+int i915_guc_submit(struct drm_i915_gem_request *rq);
void i915_guc_submission_disable(struct drm_device *dev);
void i915_guc_submission_fini(struct drm_device *dev);
-int i915_guc_wq_check_space(struct i915_guc_client *client);
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 2de57ffe5e18..944786d7075b 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -71,7 +71,8 @@
#define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT)
#define WQ_RING_TAIL_SHIFT 20
-#define WQ_RING_TAIL_MASK (0x7FF << WQ_RING_TAIL_SHIFT)
+#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */
+#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT)
#define GUC_DOORBELL_ENABLED 1
#define GUC_DOORBELL_DISABLED 0
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 876e5da44c4e..f2b88c7209cb 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -59,9 +59,12 @@
*
*/
-#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6.bin"
+#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6_1.bin"
MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
+#define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin"
+MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
+
/* User-friendly representation of an enum */
const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
{
@@ -100,6 +103,7 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
int irqs;
+ u32 tmp;
/* tell all command streamers to forward interrupts and vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
@@ -114,6 +118,16 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
I915_WRITE(GUC_WD_VECS_IER, ~irqs);
+
+ /*
+ * If GuC has routed PM interrupts to itself, don't keep it.
+ * and keep other interrupts those are unmasked by GuC.
+ */
+ tmp = I915_READ(GEN6_PMINTRMSK);
+ if (tmp & GEN8_PMINTR_REDIRECT_TO_NON_DISP) {
+ dev_priv->rps.pm_intr_keep |= ~(tmp & ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
+ dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+ }
}
static u32 get_gttype(struct drm_i915_private *dev_priv)
@@ -281,6 +295,17 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
return ret;
}
+static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
+{
+ u32 wopcm_size = GUC_WOPCM_TOP;
+
+ /* On BXT, the top of WOPCM is reserved for RC6 context */
+ if (IS_BROXTON(dev_priv))
+ wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
+
+ return wopcm_size;
+}
+
/*
* Load the GuC firmware blob into the MinuteIA.
*/
@@ -308,7 +333,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* init WOPCM */
- I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE);
+ I915_WRITE(GUC_WOPCM_SIZE, guc_wopcm_size(dev_priv));
I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
/* Enable MIA caching. GuC clock gating is disabled. */
@@ -372,65 +397,58 @@ static int i915_reset_guc(struct drm_i915_private *dev_priv)
}
/**
- * intel_guc_ucode_load() - load GuC uCode into the device
+ * intel_guc_setup() - finish preparing the GuC for activity
* @dev: drm device
*
* Called from gem_init_hw() during driver loading and also after a GPU reset.
*
+ * The main action required here it to load the GuC uCode into the device.
* The firmware image should have already been fetched into memory by the
- * earlier call to intel_guc_ucode_init(), so here we need only check that
- * is succeeded, and then transfer the image to the h/w.
+ * earlier call to intel_guc_init(), so here we need only check that worked,
+ * and then transfer the image to the h/w.
*
* Return: non-zero code on error
*/
-int intel_guc_ucode_load(struct drm_device *dev)
+int intel_guc_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
- int retries, err = 0;
+ const char *fw_path = guc_fw->guc_fw_path;
+ int retries, ret, err;
- if (!i915.enable_guc_submission)
- return 0;
-
- DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
+ DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
+ fw_path,
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
- direct_interrupts_to_host(dev_priv);
-
- if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE)
- return 0;
-
- if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS &&
- guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL)
- return -ENOEXEC;
-
- guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
-
- DRM_DEBUG_DRIVER("GuC fw fetch status %s\n",
- intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
+ /* Loading forbidden, or no firmware to load? */
+ if (!i915.enable_guc_loading) {
+ err = 0;
+ goto fail;
+ } else if (fw_path == NULL || *fw_path == '\0') {
+ if (*fw_path == '\0')
+ DRM_INFO("No GuC firmware known for this platform\n");
+ err = -ENODEV;
+ goto fail;
+ }
- switch (guc_fw->guc_fw_fetch_status) {
- case GUC_FIRMWARE_FAIL:
- /* something went wrong :( */
+ /* Fetch failed, or already fetched but failed to load? */
+ if (guc_fw->guc_fw_fetch_status != GUC_FIRMWARE_SUCCESS) {
err = -EIO;
goto fail;
-
- case GUC_FIRMWARE_NONE:
- case GUC_FIRMWARE_PENDING:
- default:
- /* "can't happen" */
- WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n",
- guc_fw->guc_fw_path,
- intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
- guc_fw->guc_fw_fetch_status);
- err = -ENXIO;
+ } else if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) {
+ err = -ENOEXEC;
goto fail;
-
- case GUC_FIRMWARE_SUCCESS:
- break;
}
+ direct_interrupts_to_host(dev_priv);
+
+ guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
+
+ DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
+ intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
+ intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+
err = i915_guc_submission_init(dev);
if (err)
goto fail;
@@ -448,7 +466,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
*/
err = i915_reset_guc(dev_priv);
if (err) {
- DRM_ERROR("GuC reset failed, err %d\n", err);
+ DRM_ERROR("GuC reset failed: %d\n", err);
goto fail;
}
@@ -459,8 +477,8 @@ int intel_guc_ucode_load(struct drm_device *dev)
if (--retries == 0)
goto fail;
- DRM_INFO("GuC fw load failed, err %d; will reset and "
- "retry %d more time(s)\n", err, retries);
+ DRM_INFO("GuC fw load failed: %d; will reset and "
+ "retry %d more time(s)\n", err, retries);
}
guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
@@ -482,7 +500,6 @@ int intel_guc_ucode_load(struct drm_device *dev)
return 0;
fail:
- DRM_ERROR("GuC firmware load failed, err %d\n", err);
if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
@@ -490,7 +507,41 @@ fail:
i915_guc_submission_disable(dev);
i915_guc_submission_fini(dev);
- return err;
+ /*
+ * We've failed to load the firmware :(
+ *
+ * Decide whether to disable GuC submission and fall back to
+ * execlist mode, and whether to hide the error by returning
+ * zero or to return -EIO, which the caller will treat as a
+ * nonfatal error (i.e. it doesn't prevent driver load, but
+ * marks the GPU as wedged until reset).
+ */
+ if (i915.enable_guc_loading > 1) {
+ ret = -EIO;
+ } else if (i915.enable_guc_submission > 1) {
+ ret = -EIO;
+ } else {
+ ret = 0;
+ }
+
+ if (err == 0)
+ DRM_INFO("GuC firmware load skipped\n");
+ else if (ret == -EIO)
+ DRM_ERROR("GuC firmware load failed: %d\n", err);
+ else
+ DRM_INFO("GuC firmware load failed: %d\n", err);
+
+ if (i915.enable_guc_submission) {
+ if (fw_path == NULL)
+ DRM_INFO("GuC submission without firmware not supported\n");
+ if (ret == 0)
+ DRM_INFO("Falling back to execlist mode\n");
+ else
+ DRM_ERROR("GuC init failed: %d\n", ret);
+ }
+ i915.enable_guc_submission = 0;
+
+ return ret;
}
static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
@@ -552,9 +603,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
/* Header and uCode will be loaded to WOPCM. Size of the two. */
size = guc_fw->header_size + guc_fw->ucode_size;
-
- /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
- if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) {
+ if (size > guc_wopcm_size(dev->dev_private)) {
DRM_ERROR("Firmware is too large to fit in WOPCM\n");
goto fail;
}
@@ -617,22 +666,25 @@ fail:
}
/**
- * intel_guc_ucode_init() - define parameters and fetch firmware
+ * intel_guc_init() - define parameters and fetch firmware
* @dev: drm device
*
* Called early during driver load, but after GEM is initialised.
*
* The firmware will be transferred to the GuC's memory later,
- * when intel_guc_ucode_load() is called.
+ * when intel_guc_setup() is called.
*/
-void intel_guc_ucode_init(struct drm_device *dev)
+void intel_guc_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
const char *fw_path;
- if (!HAS_GUC_SCHED(dev))
- i915.enable_guc_submission = false;
+ /* A negative value means "use platform default" */
+ if (i915.enable_guc_loading < 0)
+ i915.enable_guc_loading = HAS_GUC_UCODE(dev);
+ if (i915.enable_guc_submission < 0)
+ i915.enable_guc_submission = HAS_GUC_SCHED(dev);
if (!HAS_GUC_UCODE(dev)) {
fw_path = NULL;
@@ -640,27 +692,26 @@ void intel_guc_ucode_init(struct drm_device *dev)
fw_path = I915_SKL_GUC_UCODE;
guc_fw->guc_fw_major_wanted = 6;
guc_fw->guc_fw_minor_wanted = 1;
+ } else if (IS_BROXTON(dev)) {
+ fw_path = I915_BXT_GUC_UCODE;
+ guc_fw->guc_fw_major_wanted = 8;
+ guc_fw->guc_fw_minor_wanted = 7;
} else {
- i915.enable_guc_submission = false;
fw_path = ""; /* unknown device */
}
- if (!i915.enable_guc_submission)
- return;
-
guc_fw->guc_dev = dev;
guc_fw->guc_fw_path = fw_path;
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
+ /* Early (and silent) return if GuC loading is disabled */
+ if (!i915.enable_guc_loading)
+ return;
if (fw_path == NULL)
return;
-
- if (*fw_path == '\0') {
- DRM_ERROR("No GuC firmware known for this platform\n");
- guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
+ if (*fw_path == '\0')
return;
- }
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
@@ -669,10 +720,10 @@ void intel_guc_ucode_init(struct drm_device *dev)
}
/**
- * intel_guc_ucode_fini() - clean up all allocated resources
+ * intel_guc_fini() - clean up all allocated resources
* @dev: drm device
*/
-void intel_guc_ucode_fini(struct drm_device *dev)
+void intel_guc_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2c3bd9c2573e..eb455ea6ea92 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1678,35 +1678,12 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
- enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
- u32 val;
- /* Enable clock channels for this port */
- mutex_lock(&dev_priv->sb_lock);
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
- val = 0;
- if (pipe)
- val |= (1<<21);
- else
- val &= ~(1<<21);
- val |= 0x001000c4;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+ vlv_phy_pre_encoder_enable(encoder);
/* HDMI 1.0V-2dB */
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040);
- vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
-
- /* Program lane clock */
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
+ 0x2b247878);
intel_hdmi->set_infoframes(&encoder->base,
intel_crtc->config->has_hdmi_sink,
@@ -1719,207 +1696,27 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
-
intel_hdmi_prepare(encoder);
- /* Program Tx lane resets to default */
- mutex_lock(&dev_priv->sb_lock);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
- DPIO_PCS_TX_LANE2_RESET |
- DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
- DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
- DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
- (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
- DPIO_PCS_CLK_SOFT_RESET);
-
- /* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
-
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
- mutex_unlock(&dev_priv->sb_lock);
-}
-
-static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
- bool reset)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- enum pipe pipe = crtc->pipe;
- uint32_t val;
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
- if (reset)
- val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
- else
- val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
-
- if (crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
- if (reset)
- val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
- else
- val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
- }
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
- val |= CHV_PCS_REQ_SOFTRESET_EN;
- if (reset)
- val &= ~DPIO_PCS_CLK_SOFT_RESET;
- else
- val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
-
- if (crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
- val |= CHV_PCS_REQ_SOFTRESET_EN;
- if (reset)
- val &= ~DPIO_PCS_CLK_SOFT_RESET;
- else
- val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
- }
+ vlv_phy_pre_pll_enable(encoder);
}
static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- enum dpio_channel ch = vlv_dport_to_channel(dport);
- enum pipe pipe = intel_crtc->pipe;
- u32 val;
-
intel_hdmi_prepare(encoder);
- /*
- * Must trick the second common lane into life.
- * Otherwise we can't even access the PLL.
- */
- if (ch == DPIO_CH0 && pipe == PIPE_B)
- dport->release_cl2_override =
- !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
-
- chv_phy_powergate_lanes(encoder, true, 0x0);
-
- mutex_lock(&dev_priv->sb_lock);
-
- /* Assert data lane reset */
- chv_data_lane_soft_reset(encoder, true);
-
- /* program left/right clock distribution */
- if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
- val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- if (ch == DPIO_CH0)
- val |= CHV_BUFLEFTENA1_FORCE;
- if (ch == DPIO_CH1)
- val |= CHV_BUFRIGHTENA1_FORCE;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
- } else {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
- val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- if (ch == DPIO_CH0)
- val |= CHV_BUFLEFTENA2_FORCE;
- if (ch == DPIO_CH1)
- val |= CHV_BUFRIGHTENA2_FORCE;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
- }
-
- /* program clock channel usage */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
- val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
- if (pipe != PIPE_B)
- val &= ~CHV_PCS_USEDCLKCHANNEL;
- else
- val |= CHV_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
- val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
- if (pipe != PIPE_B)
- val &= ~CHV_PCS_USEDCLKCHANNEL;
- else
- val |= CHV_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
-
- /*
- * This a a bit weird since generally CL
- * matches the pipe, but here we need to
- * pick the CL based on the port.
- */
- val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
- if (pipe != PIPE_B)
- val &= ~CHV_CMN_USEDCLKCHANNEL;
- else
- val |= CHV_CMN_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
-
- mutex_unlock(&dev_priv->sb_lock);
+ chv_phy_pre_pll_enable(encoder);
}
static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
- u32 val;
-
- mutex_lock(&dev_priv->sb_lock);
-
- /* disable left/right clock distribution */
- if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
- val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
- } else {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
- val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
- }
-
- mutex_unlock(&dev_priv->sb_lock);
-
- /*
- * Leave the power down bit cleared for at least one
- * lane so that chv_powergate_phy_ch() will power
- * on something when the channel is otherwise unused.
- * When the port is off and the override is removed
- * the lanes power down anyway, so otherwise it doesn't
- * really matter what the state of power down bits is
- * after this.
- */
- chv_phy_powergate_lanes(encoder, false, 0x0);
+ chv_phy_post_pll_disable(encoder);
}
static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
-
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
- mutex_lock(&dev_priv->sb_lock);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_phy_reset_lanes(encoder);
}
static void chv_hdmi_post_disable(struct intel_encoder *encoder)
@@ -1944,138 +1741,12 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
- enum dpio_channel ch = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
- int data, i, stagger;
- u32 val;
- mutex_lock(&dev_priv->sb_lock);
-
- /* allow hardware to manage TX FIFO reset source */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
- val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
- val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
-
- /* Program Tx latency optimal setting */
- for (i = 0; i < 4; i++) {
- /* Set the upar bit */
- data = (i == 1) ? 0x0 : 0x1;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
- data << DPIO_UPAR_SHIFT);
- }
-
- /* Data lane stagger programming */
- if (intel_crtc->config->port_clock > 270000)
- stagger = 0x18;
- else if (intel_crtc->config->port_clock > 135000)
- stagger = 0xd;
- else if (intel_crtc->config->port_clock > 67500)
- stagger = 0x7;
- else if (intel_crtc->config->port_clock > 33750)
- stagger = 0x4;
- else
- stagger = 0x2;
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
- val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
- val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
-
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
- DPIO_LANESTAGGER_STRAP(stagger) |
- DPIO_LANESTAGGER_STRAP_OVRD |
- DPIO_TX1_STAGGER_MASK(0x1f) |
- DPIO_TX1_STAGGER_MULT(6) |
- DPIO_TX2_STAGGER_MULT(0));
-
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
- DPIO_LANESTAGGER_STRAP(stagger) |
- DPIO_LANESTAGGER_STRAP_OVRD |
- DPIO_TX1_STAGGER_MASK(0x1f) |
- DPIO_TX1_STAGGER_MULT(7) |
- DPIO_TX2_STAGGER_MULT(5));
-
- /* Deassert data lane reset */
- chv_data_lane_soft_reset(encoder, false);
-
- /* Clear calc init */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
- val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
- val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
- val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
- val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
- val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
- val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
- val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
- val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
- val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
- val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+ chv_phy_pre_encoder_enable(encoder);
/* FIXME: Program the support xxx V-dB */
/* Use 800mV-0dB */
- for (i = 0; i < 4; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
- val &= ~DPIO_SWING_DEEMPH9P5_MASK;
- val |= 128 << DPIO_SWING_DEEMPH9P5_SHIFT;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
- }
-
- for (i = 0; i < 4; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
-
- val &= ~DPIO_SWING_MARGIN000_MASK;
- val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
-
- /*
- * Supposedly this value shouldn't matter when unique transition
- * scale is disabled, but in fact it does matter. Let's just
- * always program the same value and hope it's OK.
- */
- val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
- val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
-
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
- }
-
- /*
- * The document said it needs to set bit 27 for ch0 and bit 26
- * for ch1. Might be a typo in the doc.
- * For now, for this unique transition scale selection, set bit
- * 27 for ch0 and ch1.
- */
- for (i = 0; i < 4; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
- val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
- }
-
- /* Start swing calculation */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
- val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
- val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
-
- mutex_unlock(&dev_priv->sb_lock);
+ chv_set_phy_signal_level(encoder, 128, 102, false);
intel_hdmi->set_infoframes(&encoder->base,
intel_crtc->config->has_hdmi_sink,
@@ -2086,10 +1757,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
vlv_wait_port_ready(dev_priv, dport, 0x0);
/* Second common lane will stay alive on its own now */
- if (dport->release_cl2_override) {
- chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
- dport->release_cl2_override = false;
- }
+ chv_phy_release_cl2_override(encoder);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -2277,7 +1945,7 @@ void intel_hdmi_init(struct drm_device *dev,
intel_encoder = &intel_dig_port->base;
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port));
intel_encoder->compute_config = intel_hdmi_compute_config;
if (HAS_PCH_SPLIT(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index bee673005d48..38eeca7a6e72 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -220,7 +220,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
}
}
if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
+ dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
intel_runtime_pm_put(dev_priv);
@@ -346,7 +346,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
/**
* intel_hpd_irq_handler - main hotplug irq handler
- * @dev: drm device
+ * @dev_priv: drm_i915_private
* @pin_mask: a mask of hpd pins that have triggered the irq
* @long_mask: a mask of hpd pins that may be long hpd pulses
*
@@ -360,10 +360,9 @@ static void i915_hotplug_work_func(struct work_struct *work)
* Here, we do hotplug irq storm detection and mitigation, and pass further
* processing to appropriate bottom halves.
*/
-void intel_hpd_irq_handler(struct drm_device *dev,
+void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int i;
enum port port;
bool storm_detected = false;
@@ -407,7 +406,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
* hotplug bits itself. So only WARN about unexpected
* interrupts on saner platforms.
*/
- WARN_ONCE(!HAS_GMCH_DISPLAY(dev),
+ WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv),
"Received HPD interrupt on pin %d although disabled\n", i);
continue;
}
@@ -427,7 +426,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
}
if (storm_detected)
- dev_priv->display.hpd_irq_setup(dev);
+ dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock(&dev_priv->irq_lock);
/*
@@ -485,7 +484,7 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
*/
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
+ dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 42eac37de047..5c191a1afaaf 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -224,10 +224,16 @@ enum {
FAULT_AND_CONTINUE /* Unsupported */
};
#define GEN8_CTX_ID_SHIFT 32
+#define GEN8_CTX_ID_WIDTH 21
#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
-static int intel_lr_context_pin(struct intel_context *ctx,
+/* Typical size of the average request (2 pipecontrols and a MI_BB) */
+#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
+
+static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+static int intel_lr_context_pin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
/**
@@ -240,23 +246,22 @@ static int intel_lr_context_pin(struct intel_context *ctx,
*
* Return: 1 if Execlists is supported and has to be enabled.
*/
-int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
+int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
{
- WARN_ON(i915.enable_ppgtt == -1);
-
/* On platforms with execlist available, vGPU will only
* support execlist mode, no ring buffer mode.
*/
- if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev))
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
return 1;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
return 1;
if (enable_execlists == 0)
return 0;
- if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
+ USES_PPGTT(dev_priv) &&
i915.use_mmio_flip >= 0)
return 1;
@@ -266,19 +271,19 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
static void
logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
+ struct drm_i915_private *dev_priv = engine->i915;
- if (IS_GEN8(dev) || IS_GEN9(dev))
+ if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
engine->idle_lite_restore_wa = ~0;
- engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
+ engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
(engine->id == VCS || engine->id == VCS2);
engine->ctx_desc_template = GEN8_CTX_VALID;
- engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
+ engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
- if (IS_GEN8(dev))
+ if (IS_GEN8(dev_priv))
engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
@@ -297,7 +302,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
* descriptor for a pinned context
*
* @ctx: Context to work on
- * @ring: Engine the descriptor will be used with
+ * @engine: Engine the descriptor will be used with
*
* The context descriptor encodes various attributes of a context,
* including its GTT address and some flags. Because it's fairly
@@ -305,62 +310,41 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
* which remains valid until the context is unpinned.
*
* This is what a descriptor looks like, from LSB to MSB:
- * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
+ * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
* bits 12-31: LRCA, GTT address of (the HWSP of) this context
- * bits 32-51: ctx ID, a globally unique tag (the LRCA again!)
- * bits 52-63: reserved, may encode the engine ID (for GuC)
+ * bits 32-52: ctx ID, a globally unique tag
+ * bits 53-54: mbz, reserved for use by hardware
+ * bits 55-63: group ID, currently unused and set to 0
*/
static void
-intel_lr_context_descriptor_update(struct intel_context *ctx,
+intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
- uint64_t lrca, desc;
+ struct intel_context *ce = &ctx->engine[engine->id];
+ u64 desc;
- lrca = ctx->engine[engine->id].lrc_vma->node.start +
- LRC_PPHWSP_PN * PAGE_SIZE;
+ BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
- desc = engine->ctx_desc_template; /* bits 0-11 */
- desc |= lrca; /* bits 12-31 */
- desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
+ desc = engine->ctx_desc_template; /* bits 0-11 */
+ desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
+ /* bits 12-31 */
+ desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
- ctx->engine[engine->id].lrc_desc = desc;
+ ce->lrc_desc = desc;
}
-uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
+uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
return ctx->engine[engine->id].lrc_desc;
}
-/**
- * intel_execlists_ctx_id() - get the Execlists Context ID
- * @ctx: Context to get the ID for
- * @ring: Engine to get the ID for
- *
- * Do not confuse with ctx->id! Unfortunately we have a name overload
- * here: the old context ID we pass to userspace as a handler so that
- * they can refer to a context, and the new context ID we pass to the
- * ELSP so that the GPU can inform us of the context status via
- * interrupts.
- *
- * The context ID is a portion of the context descriptor, so we can
- * just extract the required part from the cached descriptor.
- *
- * Return: 20-bits globally unique context ID.
- */
-u32 intel_execlists_ctx_id(struct intel_context *ctx,
- struct intel_engine_cs *engine)
-{
- return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT;
-}
-
static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
struct drm_i915_gem_request *rq1)
{
struct intel_engine_cs *engine = rq0->engine;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = rq0->i915;
uint64_t desc[2];
if (rq1) {
@@ -442,7 +426,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
* If irqs are not active generate a warning as batches that finish
* without the irqs may get lost and a GPU Hang may occur.
*/
- WARN_ON(!intel_irqs_enabled(engine->dev->dev_private));
+ WARN_ON(!intel_irqs_enabled(engine->i915));
/* Try to read in pairs */
list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
@@ -453,8 +437,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
/* Same ctx: ignore first request, as second request
* will update tail past first request's workload */
cursor->elsp_submitted = req0->elsp_submitted;
- list_move_tail(&req0->execlist_link,
- &engine->execlist_retired_req_list);
+ list_del(&req0->execlist_link);
+ i915_gem_request_unreference(req0);
req0 = cursor;
} else {
req1 = cursor;
@@ -486,7 +470,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
}
static unsigned int
-execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
+execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
{
struct drm_i915_gem_request *head_req;
@@ -496,19 +480,16 @@ execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
struct drm_i915_gem_request,
execlist_link);
- if (!head_req)
- return 0;
-
- if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id))
- return 0;
+ if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
+ return 0;
WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
if (--head_req->elsp_submitted > 0)
return 0;
- list_move_tail(&head_req->execlist_link,
- &engine->execlist_retired_req_list);
+ list_del(&head_req->execlist_link);
+ i915_gem_request_unreference(head_req);
return 1;
}
@@ -517,7 +498,7 @@ static u32
get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
u32 *context_id)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u32 status;
read_pointer %= GEN8_CSB_ENTRIES;
@@ -543,7 +524,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
static void intel_lrc_irq_handler(unsigned long data)
{
struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u32 status_pointer;
unsigned int read_pointer, write_pointer;
u32 csb[GEN8_CSB_ENTRIES][2];
@@ -612,11 +593,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
struct drm_i915_gem_request *cursor;
int num_elements = 0;
- if (request->ctx != request->i915->kernel_context)
- intel_lr_context_pin(request->ctx, engine);
-
- i915_gem_request_reference(request);
-
spin_lock_bh(&engine->execlist_lock);
list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
@@ -633,12 +609,14 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
if (request->ctx == tail_req->ctx) {
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
- list_move_tail(&tail_req->execlist_link,
- &engine->execlist_retired_req_list);
+ list_del(&tail_req->execlist_link);
+ i915_gem_request_unreference(tail_req);
}
}
+ i915_gem_request_reference(request);
list_add_tail(&request->execlist_link, &engine->execlist_queue);
+ request->ctx_hw_id = request->ctx->hw_id;
if (num_elements == 0)
execlists_context_unqueue(engine);
@@ -698,9 +676,23 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
- int ret = 0;
+ struct intel_engine_cs *engine = request->engine;
+ struct intel_context *ce = &request->ctx->engine[engine->id];
+ int ret;
+
+ /* Flush enough space to reduce the likelihood of waiting after
+ * we start building the request - in which case we will just
+ * have to repeat work.
+ */
+ request->reserved_space += EXECLISTS_REQUEST_SIZE;
- request->ringbuf = request->ctx->engine[request->engine->id].ringbuf;
+ if (!ce->state) {
+ ret = execlists_context_deferred_alloc(request->ctx, engine);
+ if (ret)
+ return ret;
+ }
+
+ request->ringbuf = ce->ringbuf;
if (i915.enable_guc_submission) {
/*
@@ -708,16 +700,39 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
* going any further, as the i915_add_request() call
* later on mustn't fail ...
*/
- struct intel_guc *guc = &request->i915->guc;
-
- ret = i915_guc_wq_check_space(guc->execbuf_client);
+ ret = i915_guc_wq_check_space(request);
if (ret)
return ret;
}
- if (request->ctx != request->i915->kernel_context)
- ret = intel_lr_context_pin(request->ctx, request->engine);
+ ret = intel_lr_context_pin(request->ctx, engine);
+ if (ret)
+ return ret;
+
+ ret = intel_ring_begin(request, 0);
+ if (ret)
+ goto err_unpin;
+
+ if (!ce->initialised) {
+ ret = engine->init_context(request);
+ if (ret)
+ goto err_unpin;
+
+ ce->initialised = true;
+ }
+
+ /* Note that after this point, we have committed to using
+ * this request as it is being used to both track the
+ * state of engine initialisation and liveness of the
+ * golden renderstate above. Think twice before you try
+ * to cancel/unwind this request now.
+ */
+
+ request->reserved_space -= EXECLISTS_REQUEST_SIZE;
+ return 0;
+err_unpin:
+ intel_lr_context_unpin(request->ctx, engine);
return ret;
}
@@ -734,7 +749,6 @@ static int
intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
- struct drm_i915_private *dev_priv = request->i915;
struct intel_engine_cs *engine = request->engine;
intel_logical_ring_advance(ringbuf);
@@ -753,40 +767,23 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
if (intel_engine_stopped(engine))
return 0;
- if (engine->last_context != request->ctx) {
- if (engine->last_context)
- intel_lr_context_unpin(engine->last_context, engine);
- if (request->ctx != request->i915->kernel_context) {
- intel_lr_context_pin(request->ctx, engine);
- engine->last_context = request->ctx;
- } else {
- engine->last_context = NULL;
- }
- }
+ /* We keep the previous context alive until we retire the following
+ * request. This ensures that any the context object is still pinned
+ * for any residual writes the HW makes into it on the context switch
+ * into the next object following the breadcrumb. Otherwise, we may
+ * retire the context too early.
+ */
+ request->previous_context = engine->last_context;
+ engine->last_context = request->ctx;
- if (dev_priv->guc.execbuf_client)
- i915_guc_submit(dev_priv->guc.execbuf_client, request);
+ if (i915.enable_guc_submission)
+ i915_guc_submit(request);
else
execlists_context_queue(request);
return 0;
}
-int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
-{
- /*
- * The first call merely notes the reserve request and is common for
- * all back ends. The subsequent localised _begin() call actually
- * ensures that the reservation is available. Without the begin, if
- * the request creator immediately submitted the request without
- * adding any commands to it then there might not actually be
- * sufficient room for the submission commands.
- */
- intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
-
- return intel_ring_begin(request, 0);
-}
-
/**
* execlists_submission() - submit a batchbuffer for execution, Execlists style
* @dev: DRM device.
@@ -881,28 +878,18 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
return 0;
}
-void intel_execlists_retire_requests(struct intel_engine_cs *engine)
+void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *req, *tmp;
- struct list_head retired_list;
+ LIST_HEAD(cancel_list);
- WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
- if (list_empty(&engine->execlist_retired_req_list))
- return;
+ WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex));
- INIT_LIST_HEAD(&retired_list);
spin_lock_bh(&engine->execlist_lock);
- list_replace_init(&engine->execlist_retired_req_list, &retired_list);
+ list_replace_init(&engine->execlist_queue, &cancel_list);
spin_unlock_bh(&engine->execlist_lock);
- list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
- struct intel_context *ctx = req->ctx;
- struct drm_i915_gem_object *ctx_obj =
- ctx->engine[engine->id].state;
-
- if (ctx_obj && (ctx != req->i915->kernel_context))
- intel_lr_context_unpin(ctx, engine);
-
+ list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
list_del(&req->execlist_link);
i915_gem_request_unreference(req);
}
@@ -910,7 +897,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *engine)
void intel_logical_ring_stop(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
if (!intel_engine_initialized(engine))
@@ -946,25 +933,26 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
return 0;
}
-static int intel_lr_context_do_pin(struct intel_context *ctx,
- struct intel_engine_cs *engine)
+static int intel_lr_context_pin(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
- struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
+ struct drm_i915_private *dev_priv = ctx->i915;
+ struct intel_context *ce = &ctx->engine[engine->id];
void *vaddr;
u32 *lrc_reg_state;
int ret;
- WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
+ lockdep_assert_held(&ctx->i915->dev->struct_mutex);
- ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
- PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+ if (ce->pin_count++)
+ return 0;
+
+ ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
+ PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (ret)
- return ret;
+ goto err;
- vaddr = i915_gem_object_pin_map(ctx_obj);
+ vaddr = i915_gem_object_pin_map(ce->state);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto unpin_ctx_obj;
@@ -972,65 +960,54 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
- ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
+ ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
if (ret)
goto unpin_map;
- ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
+ i915_gem_context_reference(ctx);
+ ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
intel_lr_context_descriptor_update(ctx, engine);
- lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
- ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
- ctx_obj->dirty = true;
+
+ lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
+ ce->lrc_reg_state = lrc_reg_state;
+ ce->state->dirty = true;
/* Invalidate GuC TLB. */
if (i915.enable_guc_submission)
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
- return ret;
+ return 0;
unpin_map:
- i915_gem_object_unpin_map(ctx_obj);
+ i915_gem_object_unpin_map(ce->state);
unpin_ctx_obj:
- i915_gem_object_ggtt_unpin(ctx_obj);
-
+ i915_gem_object_ggtt_unpin(ce->state);
+err:
+ ce->pin_count = 0;
return ret;
}
-static int intel_lr_context_pin(struct intel_context *ctx,
- struct intel_engine_cs *engine)
+void intel_lr_context_unpin(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
{
- int ret = 0;
+ struct intel_context *ce = &ctx->engine[engine->id];
- if (ctx->engine[engine->id].pin_count++ == 0) {
- ret = intel_lr_context_do_pin(ctx, engine);
- if (ret)
- goto reset_pin_count;
+ lockdep_assert_held(&ctx->i915->dev->struct_mutex);
+ GEM_BUG_ON(ce->pin_count == 0);
- i915_gem_context_reference(ctx);
- }
- return ret;
+ if (--ce->pin_count)
+ return;
-reset_pin_count:
- ctx->engine[engine->id].pin_count = 0;
- return ret;
-}
+ intel_unpin_ringbuffer_obj(ce->ringbuf);
-void intel_lr_context_unpin(struct intel_context *ctx,
- struct intel_engine_cs *engine)
-{
- struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
+ i915_gem_object_unpin_map(ce->state);
+ i915_gem_object_ggtt_unpin(ce->state);
- WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
- if (--ctx->engine[engine->id].pin_count == 0) {
- i915_gem_object_unpin_map(ctx_obj);
- intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
- i915_gem_object_ggtt_unpin(ctx_obj);
- ctx->engine[engine->id].lrc_vma = NULL;
- ctx->engine[engine->id].lrc_desc = 0;
- ctx->engine[engine->id].lrc_reg_state = NULL;
+ ce->lrc_vma = NULL;
+ ce->lrc_desc = 0;
+ ce->lrc_reg_state = NULL;
- i915_gem_context_unreference(ctx);
- }
+ i915_gem_context_unreference(ctx);
}
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
@@ -1038,9 +1015,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
int ret, i;
struct intel_engine_cs *engine = req->engine;
struct intel_ringbuffer *ringbuf = req->ringbuf;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_workarounds *w = &dev_priv->workarounds;
+ struct i915_workarounds *w = &req->i915->workarounds;
if (w->count == 0)
return 0;
@@ -1111,7 +1086,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
* this batch updates GEN8_L3SQCREG4 with default value we need to
* set this bit here to retain the WA during flush.
*/
- if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0))
+ if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0))
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
@@ -1200,7 +1175,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
- if (IS_BROADWELL(engine->dev)) {
+ if (IS_BROADWELL(engine->i915)) {
int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
if (rc < 0)
return rc;
@@ -1272,12 +1247,11 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
uint32_t *offset)
{
int ret;
- struct drm_device *dev = engine->dev;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaDisableCtxRestoreArbitration:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
+ IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@@ -1298,12 +1272,11 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
uint32_t *const batch,
uint32_t *offset)
{
- struct drm_device *dev = engine->dev;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
wa_ctx_emit(batch, index,
@@ -1312,7 +1285,7 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
}
/* WaClearTdlStateAckDirtyBits:bxt */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+ if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
@@ -1331,8 +1304,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
}
/* WaDisableCtxRestoreArbitration:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
+ IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@@ -1344,11 +1317,13 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
{
int ret;
- engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev,
+ engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev,
PAGE_ALIGN(size));
- if (!engine->wa_ctx.obj) {
+ if (IS_ERR(engine->wa_ctx.obj)) {
DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
- return -ENOMEM;
+ ret = PTR_ERR(engine->wa_ctx.obj);
+ engine->wa_ctx.obj = NULL;
+ return ret;
}
ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
@@ -1382,9 +1357,9 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
WARN_ON(engine->id != RCS);
/* update this when WA for higher Gen are added */
- if (INTEL_INFO(engine->dev)->gen > 9) {
+ if (INTEL_GEN(engine->i915) > 9) {
DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
- INTEL_INFO(engine->dev)->gen);
+ INTEL_GEN(engine->i915));
return 0;
}
@@ -1404,7 +1379,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
batch = kmap_atomic(page);
offset = 0;
- if (INTEL_INFO(engine->dev)->gen == 8) {
+ if (IS_GEN8(engine->i915)) {
ret = gen8_init_indirectctx_bb(engine,
&wa_ctx->indirect_ctx,
batch,
@@ -1418,7 +1393,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
&offset);
if (ret)
goto out;
- } else if (INTEL_INFO(engine->dev)->gen == 9) {
+ } else if (IS_GEN9(engine->i915)) {
ret = gen9_init_indirectctx_bb(engine,
&wa_ctx->indirect_ctx,
batch,
@@ -1444,7 +1419,7 @@ out:
static void lrc_init_hws(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE(RING_HWS_PGA(engine->mmio_base),
(u32)engine->status_page.gfx_addr);
@@ -1453,8 +1428,7 @@ static void lrc_init_hws(struct intel_engine_cs *engine)
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned int next_context_status_buffer_hw;
lrc_init_hws(engine);
@@ -1501,8 +1475,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
static int gen8_init_render_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
ret = gen8_init_common_ring(engine);
@@ -1579,7 +1552,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
if (req->ctx->ppgtt &&
(intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
if (!USES_FULL_48BIT_PPGTT(req->i915) &&
- !intel_vgpu_active(req->i915->dev)) {
+ !intel_vgpu_active(req->i915)) {
ret = intel_logical_ring_emit_pdps(req);
if (ret)
return ret;
@@ -1607,8 +1580,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1627,8 +1599,7 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1645,8 +1616,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *engine = ringbuf->engine;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = request->i915;
uint32_t cmd;
int ret;
@@ -1714,7 +1684,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
* pipe control.
*/
- if (IS_GEN9(engine->dev))
+ if (IS_GEN9(request->i915))
vf_flush_wa = true;
}
@@ -1782,11 +1752,6 @@ static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
*/
#define WA_TAIL_DWORDS 2
-static inline u32 hws_seqno_address(struct intel_engine_cs *engine)
-{
- return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
-}
-
static int gen8_emit_request(struct drm_i915_gem_request *request)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
@@ -1802,7 +1767,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
intel_logical_ring_emit(ringbuf,
(MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
intel_logical_ring_emit(ringbuf,
- hws_seqno_address(request->engine) |
+ intel_hws_seqno_address(request->engine) |
MI_FLUSH_DW_USE_GTT);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
@@ -1832,7 +1797,8 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
(PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE));
- intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine));
+ intel_logical_ring_emit(ringbuf,
+ intel_hws_seqno_address(request->engine));
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
/* We're thrashing one dword of HWS. */
@@ -1911,7 +1877,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
tasklet_kill(&engine->irq_tasklet);
- dev_priv = engine->dev->dev_private;
+ dev_priv = engine->i915;
if (engine->buffer) {
intel_logical_ring_stop(engine);
@@ -1928,18 +1894,18 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
i915_gem_object_unpin_map(engine->status_page.obj);
engine->status_page.obj = NULL;
}
+ intel_lr_context_unpin(dev_priv->kernel_context, engine);
engine->idle_lite_restore_wa = 0;
engine->disable_lite_restore_wa = false;
engine->ctx_desc_template = 0;
lrc_destroy_wa_ctx_obj(engine);
- engine->dev = NULL;
+ engine->i915 = NULL;
}
static void
-logical_ring_default_vfuncs(struct drm_device *dev,
- struct intel_engine_cs *engine)
+logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
/* Default vfuncs which can be overriden by each engine. */
engine->init_hw = gen8_init_common_ring;
@@ -1950,7 +1916,7 @@ logical_ring_default_vfuncs(struct drm_device *dev,
engine->emit_bb_start = gen8_emit_bb_start;
engine->get_seqno = gen8_get_seqno;
engine->set_seqno = gen8_set_seqno;
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
engine->irq_seqno_barrier = bxt_a_seqno_barrier;
engine->set_seqno = bxt_a_set_seqno;
}
@@ -1961,6 +1927,7 @@ logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
{
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+ init_waitqueue_head(&engine->irq_queue);
}
static int
@@ -1981,32 +1948,68 @@ lrc_setup_hws(struct intel_engine_cs *engine,
return 0;
}
-static int
-logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
+static const struct logical_ring_info {
+ const char *name;
+ unsigned exec_id;
+ unsigned guc_id;
+ u32 mmio_base;
+ unsigned irq_shift;
+} logical_rings[] = {
+ [RCS] = {
+ .name = "render ring",
+ .exec_id = I915_EXEC_RENDER,
+ .guc_id = GUC_RENDER_ENGINE,
+ .mmio_base = RENDER_RING_BASE,
+ .irq_shift = GEN8_RCS_IRQ_SHIFT,
+ },
+ [BCS] = {
+ .name = "blitter ring",
+ .exec_id = I915_EXEC_BLT,
+ .guc_id = GUC_BLITTER_ENGINE,
+ .mmio_base = BLT_RING_BASE,
+ .irq_shift = GEN8_BCS_IRQ_SHIFT,
+ },
+ [VCS] = {
+ .name = "bsd ring",
+ .exec_id = I915_EXEC_BSD,
+ .guc_id = GUC_VIDEO_ENGINE,
+ .mmio_base = GEN6_BSD_RING_BASE,
+ .irq_shift = GEN8_VCS1_IRQ_SHIFT,
+ },
+ [VCS2] = {
+ .name = "bsd2 ring",
+ .exec_id = I915_EXEC_BSD,
+ .guc_id = GUC_VIDEO_ENGINE2,
+ .mmio_base = GEN8_BSD2_RING_BASE,
+ .irq_shift = GEN8_VCS2_IRQ_SHIFT,
+ },
+ [VECS] = {
+ .name = "video enhancement ring",
+ .exec_id = I915_EXEC_VEBOX,
+ .guc_id = GUC_VIDEOENHANCE_ENGINE,
+ .mmio_base = VEBOX_RING_BASE,
+ .irq_shift = GEN8_VECS_IRQ_SHIFT,
+ },
+};
+
+static struct intel_engine_cs *
+logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
{
+ const struct logical_ring_info *info = &logical_rings[id];
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_context *dctx = dev_priv->kernel_context;
+ struct intel_engine_cs *engine = &dev_priv->engine[id];
enum forcewake_domains fw_domains;
- int ret;
-
- /* Intentionally left blank. */
- engine->buffer = NULL;
-
- engine->dev = dev;
- INIT_LIST_HEAD(&engine->active_list);
- INIT_LIST_HEAD(&engine->request_list);
- i915_gem_batch_pool_init(dev, &engine->batch_pool);
- init_waitqueue_head(&engine->irq_queue);
- INIT_LIST_HEAD(&engine->buffers);
- INIT_LIST_HEAD(&engine->execlist_queue);
- INIT_LIST_HEAD(&engine->execlist_retired_req_list);
- spin_lock_init(&engine->execlist_lock);
+ engine->id = id;
+ engine->name = info->name;
+ engine->exec_id = info->exec_id;
+ engine->guc_id = info->guc_id;
+ engine->mmio_base = info->mmio_base;
- tasklet_init(&engine->irq_tasklet,
- intel_lrc_irq_handler, (unsigned long)engine);
+ engine->i915 = dev_priv;
- logical_ring_init_platform_invariants(engine);
+ /* Intentionally left blank. */
+ engine->buffer = NULL;
fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
RING_ELSP(engine),
@@ -2022,20 +2025,44 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
engine->fw_domains = fw_domains;
+ INIT_LIST_HEAD(&engine->active_list);
+ INIT_LIST_HEAD(&engine->request_list);
+ INIT_LIST_HEAD(&engine->buffers);
+ INIT_LIST_HEAD(&engine->execlist_queue);
+ spin_lock_init(&engine->execlist_lock);
+
+ tasklet_init(&engine->irq_tasklet,
+ intel_lrc_irq_handler, (unsigned long)engine);
+
+ logical_ring_init_platform_invariants(engine);
+ logical_ring_default_vfuncs(engine);
+ logical_ring_default_irqs(engine, info->irq_shift);
+
+ intel_engine_init_hangcheck(engine);
+ i915_gem_batch_pool_init(dev, &engine->batch_pool);
+
+ return engine;
+}
+
+static int
+logical_ring_init(struct intel_engine_cs *engine)
+{
+ struct i915_gem_context *dctx = engine->i915->kernel_context;
+ int ret;
+
ret = i915_cmd_parser_init_ring(engine);
if (ret)
goto error;
- ret = intel_lr_context_deferred_alloc(dctx, engine);
+ ret = execlists_context_deferred_alloc(dctx, engine);
if (ret)
goto error;
/* As this is the default context, always pin it */
- ret = intel_lr_context_do_pin(dctx, engine);
+ ret = intel_lr_context_pin(dctx, engine);
if (ret) {
- DRM_ERROR(
- "Failed to pin and map ringbuffer %s: %d\n",
- engine->name, ret);
+ DRM_ERROR("Failed to pin context for %s: %d\n",
+ engine->name, ret);
goto error;
}
@@ -2055,22 +2082,12 @@ error:
static int logical_render_ring_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+ struct intel_engine_cs *engine = logical_ring_setup(dev, RCS);
int ret;
- engine->name = "render ring";
- engine->id = RCS;
- engine->exec_id = I915_EXEC_RENDER;
- engine->guc_id = GUC_RENDER_ENGINE;
- engine->mmio_base = RENDER_RING_BASE;
-
- logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT);
if (HAS_L3_DPF(dev))
engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- logical_ring_default_vfuncs(dev, engine);
-
/* Override some for render ring. */
if (INTEL_INFO(dev)->gen >= 9)
engine->init_hw = gen9_init_render_ring;
@@ -2081,8 +2098,6 @@ static int logical_render_ring_init(struct drm_device *dev)
engine->emit_flush = gen8_emit_flush_render;
engine->emit_request = gen8_emit_request_render;
- engine->dev = dev;
-
ret = intel_init_pipe_control(engine);
if (ret)
return ret;
@@ -2098,7 +2113,7 @@ static int logical_render_ring_init(struct drm_device *dev)
ret);
}
- ret = logical_ring_init(dev, engine);
+ ret = logical_ring_init(engine);
if (ret) {
lrc_destroy_wa_ctx_obj(engine);
}
@@ -2108,70 +2123,30 @@ static int logical_render_ring_init(struct drm_device *dev)
static int logical_bsd_ring_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *engine = &dev_priv->engine[VCS];
-
- engine->name = "bsd ring";
- engine->id = VCS;
- engine->exec_id = I915_EXEC_BSD;
- engine->guc_id = GUC_VIDEO_ENGINE;
- engine->mmio_base = GEN6_BSD_RING_BASE;
-
- logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, engine);
+ struct intel_engine_cs *engine = logical_ring_setup(dev, VCS);
- return logical_ring_init(dev, engine);
+ return logical_ring_init(engine);
}
static int logical_bsd2_ring_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
-
- engine->name = "bsd2 ring";
- engine->id = VCS2;
- engine->exec_id = I915_EXEC_BSD;
- engine->guc_id = GUC_VIDEO_ENGINE2;
- engine->mmio_base = GEN8_BSD2_RING_BASE;
-
- logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, engine);
+ struct intel_engine_cs *engine = logical_ring_setup(dev, VCS2);
- return logical_ring_init(dev, engine);
+ return logical_ring_init(engine);
}
static int logical_blt_ring_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *engine = &dev_priv->engine[BCS];
-
- engine->name = "blitter ring";
- engine->id = BCS;
- engine->exec_id = I915_EXEC_BLT;
- engine->guc_id = GUC_BLITTER_ENGINE;
- engine->mmio_base = BLT_RING_BASE;
-
- logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, engine);
+ struct intel_engine_cs *engine = logical_ring_setup(dev, BCS);
- return logical_ring_init(dev, engine);
+ return logical_ring_init(engine);
}
static int logical_vebox_ring_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *engine = &dev_priv->engine[VECS];
-
- engine->name = "video enhancement ring";
- engine->id = VECS;
- engine->exec_id = I915_EXEC_VEBOX;
- engine->guc_id = GUC_VIDEOENHANCE_ENGINE;
- engine->mmio_base = VEBOX_RING_BASE;
+ struct intel_engine_cs *engine = logical_ring_setup(dev, VECS);
- logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, engine);
-
- return logical_ring_init(dev, engine);
+ return logical_ring_init(engine);
}
/**
@@ -2232,7 +2207,7 @@ cleanup_render_ring:
}
static u32
-make_rpcs(struct drm_device *dev)
+make_rpcs(struct drm_i915_private *dev_priv)
{
u32 rpcs = 0;
@@ -2240,7 +2215,7 @@ make_rpcs(struct drm_device *dev)
* No explicit RPCS request is needed to ensure full
* slice/subslice/EU enablement prior to Gen9.
*/
- if (INTEL_INFO(dev)->gen < 9)
+ if (INTEL_GEN(dev_priv) < 9)
return 0;
/*
@@ -2249,24 +2224,24 @@ make_rpcs(struct drm_device *dev)
* must make an explicit request through RPCS for full
* enablement.
*/
- if (INTEL_INFO(dev)->has_slice_pg) {
+ if (INTEL_INFO(dev_priv)->has_slice_pg) {
rpcs |= GEN8_RPCS_S_CNT_ENABLE;
- rpcs |= INTEL_INFO(dev)->slice_total <<
+ rpcs |= INTEL_INFO(dev_priv)->slice_total <<
GEN8_RPCS_S_CNT_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
- if (INTEL_INFO(dev)->has_subslice_pg) {
+ if (INTEL_INFO(dev_priv)->has_subslice_pg) {
rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
- rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
+ rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
GEN8_RPCS_SS_CNT_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
- if (INTEL_INFO(dev)->has_eu_pg) {
- rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+ if (INTEL_INFO(dev_priv)->has_eu_pg) {
+ rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
GEN8_RPCS_EU_MIN_SHIFT;
- rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+ rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
GEN8_RPCS_EU_MAX_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
@@ -2278,9 +2253,9 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
{
u32 indirect_ctx_offset;
- switch (INTEL_INFO(engine->dev)->gen) {
+ switch (INTEL_GEN(engine->i915)) {
default:
- MISSING_CASE(INTEL_INFO(engine->dev)->gen);
+ MISSING_CASE(INTEL_GEN(engine->i915));
/* fall through */
case 9:
indirect_ctx_offset =
@@ -2296,13 +2271,12 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
}
static int
-populate_lr_context(struct intel_context *ctx,
+populate_lr_context(struct i915_gem_context *ctx,
struct drm_i915_gem_object *ctx_obj,
struct intel_engine_cs *engine,
struct intel_ringbuffer *ringbuf)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = ctx->i915;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
void *vaddr;
u32 *reg_state;
@@ -2340,7 +2314,7 @@ populate_lr_context(struct intel_context *ctx,
RING_CONTEXT_CONTROL(engine),
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
- (HAS_RESOURCE_STREAMER(dev) ?
+ (HAS_RESOURCE_STREAMER(dev_priv) ?
CTX_CTRL_RS_CTX_ENABLE : 0)));
ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
0);
@@ -2429,7 +2403,7 @@ populate_lr_context(struct intel_context *ctx,
if (engine->id == RCS) {
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- make_rpcs(dev));
+ make_rpcs(dev_priv));
}
i915_gem_object_unpin_map(ctx_obj);
@@ -2438,37 +2412,6 @@ populate_lr_context(struct intel_context *ctx,
}
/**
- * intel_lr_context_free() - free the LRC specific bits of a context
- * @ctx: the LR context to free.
- *
- * The real context freeing is done in i915_gem_context_free: this only
- * takes care of the bits that are LRC related: the per-engine backing
- * objects and the logical ringbuffer.
- */
-void intel_lr_context_free(struct intel_context *ctx)
-{
- int i;
-
- for (i = I915_NUM_ENGINES; --i >= 0; ) {
- struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
- struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
-
- if (!ctx_obj)
- continue;
-
- if (ctx == ctx->i915->kernel_context) {
- intel_unpin_ringbuffer_obj(ringbuf);
- i915_gem_object_ggtt_unpin(ctx_obj);
- i915_gem_object_unpin_map(ctx_obj);
- }
-
- WARN_ON(ctx->engine[i].pin_count);
- intel_ringbuffer_free(ringbuf);
- drm_gem_object_unreference(&ctx_obj->base);
- }
-}
-
-/**
* intel_lr_context_size() - return the size of the context for an engine
* @ring: which engine to find the context size for
*
@@ -2486,11 +2429,11 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
{
int ret = 0;
- WARN_ON(INTEL_INFO(engine->dev)->gen < 8);
+ WARN_ON(INTEL_GEN(engine->i915) < 8);
switch (engine->id) {
case RCS:
- if (INTEL_INFO(engine->dev)->gen >= 9)
+ if (INTEL_GEN(engine->i915) >= 9)
ret = GEN9_LR_CONTEXT_RENDER_SIZE;
else
ret = GEN8_LR_CONTEXT_RENDER_SIZE;
@@ -2507,9 +2450,9 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
}
/**
- * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
+ * execlists_context_deferred_alloc() - create the LRC specific bits of a context
* @ctx: LR context to create.
- * @ring: engine to be used with the context.
+ * @engine: engine to be used with the context.
*
* This function can be called more than once, with different engines, if we plan
* to use the context with them. The context backing objects and the ringbuffers
@@ -2519,28 +2462,26 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
*
* Return: non-zero on error.
*/
-
-int intel_lr_context_deferred_alloc(struct intel_context *ctx,
- struct intel_engine_cs *engine)
+static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
struct drm_i915_gem_object *ctx_obj;
+ struct intel_context *ce = &ctx->engine[engine->id];
uint32_t context_size;
struct intel_ringbuffer *ringbuf;
int ret;
- WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
- WARN_ON(ctx->engine[engine->id].state);
+ WARN_ON(ce->state);
context_size = round_up(intel_lr_context_size(engine), 4096);
/* One extra page as the sharing data between driver and GuC */
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
- ctx_obj = i915_gem_alloc_object(dev, context_size);
- if (!ctx_obj) {
+ ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size);
+ if (IS_ERR(ctx_obj)) {
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
- return -ENOMEM;
+ return PTR_ERR(ctx_obj);
}
ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
@@ -2555,48 +2496,29 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
goto error_ringbuf;
}
- ctx->engine[engine->id].ringbuf = ringbuf;
- ctx->engine[engine->id].state = ctx_obj;
+ ce->ringbuf = ringbuf;
+ ce->state = ctx_obj;
+ ce->initialised = engine->init_context == NULL;
- if (ctx != ctx->i915->kernel_context && engine->init_context) {
- struct drm_i915_gem_request *req;
-
- req = i915_gem_request_alloc(engine, ctx);
- if (IS_ERR(req)) {
- ret = PTR_ERR(req);
- DRM_ERROR("ring create req: %d\n", ret);
- goto error_ringbuf;
- }
-
- ret = engine->init_context(req);
- i915_add_request_no_flush(req);
- if (ret) {
- DRM_ERROR("ring init context: %d\n",
- ret);
- goto error_ringbuf;
- }
- }
return 0;
error_ringbuf:
intel_ringbuffer_free(ringbuf);
error_deref_obj:
drm_gem_object_unreference(&ctx_obj->base);
- ctx->engine[engine->id].ringbuf = NULL;
- ctx->engine[engine->id].state = NULL;
+ ce->ringbuf = NULL;
+ ce->state = NULL;
return ret;
}
void intel_lr_context_reset(struct drm_i915_private *dev_priv,
- struct intel_context *ctx)
+ struct i915_gem_context *ctx)
{
struct intel_engine_cs *engine;
for_each_engine(engine, dev_priv) {
- struct drm_i915_gem_object *ctx_obj =
- ctx->engine[engine->id].state;
- struct intel_ringbuffer *ringbuf =
- ctx->engine[engine->id].ringbuf;
+ struct intel_context *ce = &ctx->engine[engine->id];
+ struct drm_i915_gem_object *ctx_obj = ce->state;
void *vaddr;
uint32_t *reg_state;
@@ -2615,7 +2537,7 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
i915_gem_object_unpin_map(ctx_obj);
- ringbuf->head = 0;
- ringbuf->tail = 0;
+ ce->ringbuf->head = 0;
+ ce->ringbuf->tail = 0;
}
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 60a7385bc531..a8db42a9c50f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -99,30 +99,27 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1)
#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
-void intel_lr_context_free(struct intel_context *ctx);
+struct i915_gem_context;
+
uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
-int intel_lr_context_deferred_alloc(struct intel_context *ctx,
- struct intel_engine_cs *engine);
-void intel_lr_context_unpin(struct intel_context *ctx,
+void intel_lr_context_unpin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
struct drm_i915_private;
void intel_lr_context_reset(struct drm_i915_private *dev_priv,
- struct intel_context *ctx);
-uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
+ struct i915_gem_context *ctx);
+uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
-u32 intel_execlists_ctx_id(struct intel_context *ctx,
- struct intel_engine_cs *engine);
-
/* Execlists */
-int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
+int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
+ int enable_execlists);
struct i915_execbuffer_params;
int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas);
-void intel_execlists_retire_requests(struct intel_engine_cs *engine);
+void intel_execlists_cancel_requests(struct intel_engine_cs *engine);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index bc53c0dd34d0..62eaa895fe5b 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -190,7 +190,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
/* Set the dithering flag on LVDS as needed, note that there is no
* special lvds dither control bit on pch-split platforms, dithering is
* only controlled through the PIPECONF reg. */
- if (INTEL_INFO(dev)->gen == 4) {
+ if (IS_GEN4(dev_priv)) {
/* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels. */
if (crtc->config->dither && crtc->config->pipe_bpp == 18)
@@ -978,7 +978,7 @@ void intel_lvds_init(struct drm_device *dev)
DRM_MODE_CONNECTOR_LVDS);
drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ DRM_MODE_ENCODER_LVDS, "LVDS");
intel_encoder->enable = intel_enable_lvds;
intel_encoder->pre_enable = intel_pre_enable_lvds;
@@ -1082,6 +1082,8 @@ void intel_lvds_init(struct drm_device *dev)
fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
if (fixed_mode) {
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = fixed_mode->width_mm;
+ connector->display_info.height_mm = fixed_mode->height_mm;
goto out;
}
}
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 6ba4bf7f2a89..b765c75f3fcd 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -189,7 +189,7 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
*/
int intel_mocs_init_engine(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_mocs_table table;
unsigned int index;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 99e26034ae8d..f6d8a21d2c49 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -240,10 +240,11 @@ struct opregion_asle_ext {
#define MAX_DSLP 1500
-static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
+static int swsci(struct drm_i915_private *dev_priv,
+ u32 function, u32 parm, u32 *parm_out)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_swsci *swsci = dev_priv->opregion.swsci;
+ struct pci_dev *pdev = dev_priv->dev->pdev;
u32 main_function, sub_function, scic;
u16 swsci_val;
u32 dslp;
@@ -293,16 +294,16 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
swsci->scic = scic;
/* Ensure SCI event is selected and event trigger is cleared. */
- pci_read_config_word(dev->pdev, SWSCI, &swsci_val);
+ pci_read_config_word(pdev, SWSCI, &swsci_val);
if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) {
swsci_val |= SWSCI_SCISEL;
swsci_val &= ~SWSCI_GSSCIE;
- pci_write_config_word(dev->pdev, SWSCI, swsci_val);
+ pci_write_config_word(pdev, SWSCI, swsci_val);
}
/* Use event trigger to tell bios to check the mail. */
swsci_val |= SWSCI_GSSCIE;
- pci_write_config_word(dev->pdev, SWSCI, swsci_val);
+ pci_write_config_word(pdev, SWSCI, swsci_val);
/* Poll for the result. */
#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
@@ -336,13 +337,13 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
u32 parm = 0;
u32 type = 0;
u32 port;
/* don't care about old stuff for now */
- if (!HAS_DDI(dev))
+ if (!HAS_DDI(dev_priv))
return 0;
if (intel_encoder->type == INTEL_OUTPUT_DSI)
@@ -382,7 +383,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
parm |= type << (16 + port * 3);
- return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
+ return swsci(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
}
static const struct {
@@ -396,27 +397,28 @@ static const struct {
{ PCI_D3cold, 0x04 },
};
-int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
+ pci_power_t state)
{
int i;
- if (!HAS_DDI(dev))
+ if (!HAS_DDI(dev_priv))
return 0;
for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
if (state == power_state_map[i].pci_power_state)
- return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE,
+ return swsci(dev_priv, SWSCI_SBCB_ADAPTER_POWER_STATE,
power_state_map[i].parm, NULL);
}
return -EINVAL;
}
-static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
+static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_connector *connector;
struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct drm_device *dev = dev_priv->dev;
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
@@ -449,7 +451,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
return 0;
}
-static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
+static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi)
{
/* alsi is the current ALS reading in lux. 0 indicates below sensor
range, 0xffff indicates above sensor range. 1-0xfffe are valid */
@@ -457,13 +459,13 @@ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
return ASLC_ALS_ILLUM_FAILED;
}
-static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
+static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb)
{
DRM_DEBUG_DRIVER("PWM freq is not supported\n");
return ASLC_PWM_FREQ_FAILED;
}
-static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
+static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit)
{
/* Panel fitting is currently controlled by the X code, so this is a
noop until modesetting support works fully */
@@ -471,13 +473,13 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
return ASLC_PFIT_FAILED;
}
-static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot)
+static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot)
{
DRM_DEBUG_DRIVER("SROT is not supported\n");
return ASLC_ROTATION_ANGLES_FAILED;
}
-static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
+static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
{
if (!iuer)
DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
@@ -495,7 +497,7 @@ static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
return ASLC_BUTTON_ARRAY_FAILED;
}
-static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
+static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
{
if (iuer & ASLE_IUER_CONVERTIBLE)
DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
@@ -505,7 +507,7 @@ static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
return ASLC_CONVERTIBLE_FAILED;
}
-static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
+static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer)
{
if (iuer & ASLE_IUER_DOCKING)
DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
@@ -515,7 +517,7 @@ static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
return ASLC_DOCKING_FAILED;
}
-static u32 asle_isct_state(struct drm_device *dev)
+static u32 asle_isct_state(struct drm_i915_private *dev_priv)
{
DRM_DEBUG_DRIVER("ISCT is not supported\n");
return ASLC_ISCT_STATE_FAILED;
@@ -527,7 +529,6 @@ static void asle_work(struct work_struct *work)
container_of(work, struct intel_opregion, asle_work);
struct drm_i915_private *dev_priv =
container_of(opregion, struct drm_i915_private, opregion);
- struct drm_device *dev = dev_priv->dev;
struct opregion_asle *asle = dev_priv->opregion.asle;
u32 aslc_stat = 0;
u32 aslc_req;
@@ -544,40 +545,38 @@ static void asle_work(struct work_struct *work)
}
if (aslc_req & ASLC_SET_ALS_ILLUM)
- aslc_stat |= asle_set_als_illum(dev, asle->alsi);
+ aslc_stat |= asle_set_als_illum(dev_priv, asle->alsi);
if (aslc_req & ASLC_SET_BACKLIGHT)
- aslc_stat |= asle_set_backlight(dev, asle->bclp);
+ aslc_stat |= asle_set_backlight(dev_priv, asle->bclp);
if (aslc_req & ASLC_SET_PFIT)
- aslc_stat |= asle_set_pfit(dev, asle->pfit);
+ aslc_stat |= asle_set_pfit(dev_priv, asle->pfit);
if (aslc_req & ASLC_SET_PWM_FREQ)
- aslc_stat |= asle_set_pwm_freq(dev, asle->pfmb);
+ aslc_stat |= asle_set_pwm_freq(dev_priv, asle->pfmb);
if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
- aslc_stat |= asle_set_supported_rotation_angles(dev,
+ aslc_stat |= asle_set_supported_rotation_angles(dev_priv,
asle->srot);
if (aslc_req & ASLC_BUTTON_ARRAY)
- aslc_stat |= asle_set_button_array(dev, asle->iuer);
+ aslc_stat |= asle_set_button_array(dev_priv, asle->iuer);
if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
- aslc_stat |= asle_set_convertible(dev, asle->iuer);
+ aslc_stat |= asle_set_convertible(dev_priv, asle->iuer);
if (aslc_req & ASLC_DOCKING_INDICATOR)
- aslc_stat |= asle_set_docking(dev, asle->iuer);
+ aslc_stat |= asle_set_docking(dev_priv, asle->iuer);
if (aslc_req & ASLC_ISCT_STATE_CHANGE)
- aslc_stat |= asle_isct_state(dev);
+ aslc_stat |= asle_isct_state(dev_priv);
asle->aslc = aslc_stat;
}
-void intel_opregion_asle_intr(struct drm_device *dev)
+void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (dev_priv->opregion.asle)
schedule_work(&dev_priv->opregion.asle_work);
}
@@ -658,10 +657,10 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
}
}
-static void intel_didl_outputs(struct drm_device *dev)
+static void intel_didl_outputs(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
+ struct pci_dev *pdev = dev_priv->dev->pdev;
struct drm_connector *connector;
acpi_handle handle;
struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
@@ -670,7 +669,7 @@ static void intel_didl_outputs(struct drm_device *dev)
u32 temp, max_outputs;
int i = 0;
- handle = ACPI_HANDLE(&dev->pdev->dev);
+ handle = ACPI_HANDLE(&pdev->dev);
if (!handle || acpi_bus_get_device(handle, &acpi_dev))
return;
@@ -725,7 +724,7 @@ end:
blind_set:
i = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ list_for_each_entry(connector, &dev_priv->dev->mode_config.connector_list, head) {
int output_type = ACPI_OTHER_OUTPUT;
if (i >= max_outputs) {
DRM_DEBUG_KMS("More than %u outputs in connector list\n",
@@ -761,9 +760,8 @@ blind_set:
goto end;
}
-static void intel_setup_cadls(struct drm_device *dev)
+static void intel_setup_cadls(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
int i = 0;
u32 disp_id;
@@ -780,17 +778,16 @@ static void intel_setup_cadls(struct drm_device *dev)
} while (++i < 8 && disp_id != 0);
}
-void intel_opregion_init(struct drm_device *dev)
+void intel_opregion_register(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
if (!opregion->header)
return;
if (opregion->acpi) {
- intel_didl_outputs(dev);
- intel_setup_cadls(dev);
+ intel_didl_outputs(dev_priv);
+ intel_setup_cadls(dev_priv);
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
@@ -808,9 +805,8 @@ void intel_opregion_init(struct drm_device *dev)
}
}
-void intel_opregion_fini(struct drm_device *dev)
+void intel_opregion_unregister(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
if (!opregion->header)
@@ -842,9 +838,8 @@ void intel_opregion_fini(struct drm_device *dev)
opregion->lid_state = NULL;
}
-static void swsci_setup(struct drm_device *dev)
+static void swsci_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
bool requested_callbacks = false;
u32 tmp;
@@ -854,7 +849,7 @@ static void swsci_setup(struct drm_device *dev)
opregion->swsci_sbcb_sub_functions = 1;
/* We use GBDA to ask for supported GBDA calls. */
- if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
+ if (swsci(dev_priv, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
/* make the bits match the sub-function codes */
tmp <<= 1;
opregion->swsci_gbda_sub_functions |= tmp;
@@ -865,7 +860,7 @@ static void swsci_setup(struct drm_device *dev)
* must not call interfaces that are not specifically requested by the
* bios.
*/
- if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
+ if (swsci(dev_priv, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
/* here, the bits already match sub-function codes */
opregion->swsci_sbcb_sub_functions |= tmp;
requested_callbacks = true;
@@ -876,7 +871,7 @@ static void swsci_setup(struct drm_device *dev)
* the callback is _requested_. But we still can't call interfaces that
* are not requested.
*/
- if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
+ if (swsci(dev_priv, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
/* make the bits match the sub-function codes */
u32 low = tmp & 0x7ff;
u32 high = tmp & ~0xfff; /* bit 11 is reserved */
@@ -918,10 +913,10 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
{ }
};
-int intel_opregion_setup(struct drm_device *dev)
+int intel_opregion_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
+ struct pci_dev *pdev = dev_priv->dev->pdev;
u32 asls, mboxes;
char buf[sizeof(OPREGION_SIGNATURE)];
int err = 0;
@@ -933,7 +928,7 @@ int intel_opregion_setup(struct drm_device *dev)
BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
- pci_read_config_dword(dev->pdev, ASLS, &asls);
+ pci_read_config_dword(pdev, ASLS, &asls);
DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
if (asls == 0) {
DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
@@ -965,7 +960,7 @@ int intel_opregion_setup(struct drm_device *dev)
if (mboxes & MBOX_SWSCI) {
DRM_DEBUG_DRIVER("SWSCI supported\n");
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
- swsci_setup(dev);
+ swsci_setup(dev_priv);
}
if (mboxes & MBOX_ASLE) {
@@ -1014,12 +1009,12 @@ err_out:
}
int
-intel_opregion_get_panel_type(struct drm_device *dev)
+intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
{
u32 panel_details;
int ret;
- ret = swsci(dev, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
+ ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
if (ret) {
DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n",
ret);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index bd38e49f7334..eb93f90bb74d 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -168,7 +168,7 @@ struct overlay_registers {
};
struct intel_overlay {
- struct drm_device *dev;
+ struct drm_i915_private *i915;
struct intel_crtc *crtc;
struct drm_i915_gem_object *vid_bo;
struct drm_i915_gem_object *old_vid_bo;
@@ -190,15 +190,15 @@ struct intel_overlay {
static struct overlay_registers __iomem *
intel_overlay_map_regs(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = to_i915(overlay->dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct drm_i915_private *dev_priv = overlay->i915;
struct overlay_registers __iomem *regs;
- if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_wc(ggtt->mappable,
- i915_gem_obj_ggtt_offset(overlay->reg_bo));
+ regs = io_mapping_map_wc(dev_priv->ggtt.mappable,
+ overlay->flip_addr,
+ PAGE_SIZE);
return regs;
}
@@ -206,7 +206,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
struct overlay_registers __iomem *regs)
{
- if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
io_mapping_unmap(regs);
}
@@ -232,14 +232,13 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
/* overlay needs to be disable in OCMD reg */
static int intel_overlay_on(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
int ret;
WARN_ON(overlay->active);
- WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
+ WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
@@ -266,8 +265,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
static int intel_overlay_continue(struct intel_overlay *overlay,
bool load_polyphase_filter)
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr;
@@ -335,8 +333,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
/* overlay needs to be disabled in OCMD reg */
static int intel_overlay_off(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr;
@@ -365,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
intel_ring_emit(engine, flip_addr);
intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
- if (IS_I830(dev)) {
+ if (IS_I830(dev_priv)) {
/* Workaround: Don't disable the overlay fully, since otherwise
* it dies on the next OVERLAY_ON cmd. */
intel_ring_emit(engine, MI_NOOP);
@@ -408,12 +405,11 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
*/
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
int ret;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ lockdep_assert_held(&dev_priv->dev->struct_mutex);
/* Only wait if there is actually an old frame to release to
* guarantee forward progress.
@@ -537,10 +533,10 @@ static int uv_vsubsampling(u32 format)
}
}
-static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
+static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
{
u32 mask, shift, ret;
- if (IS_GEN2(dev)) {
+ if (IS_GEN2(dev_priv)) {
mask = 0x1f;
shift = 5;
} else {
@@ -548,7 +544,7 @@ static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
shift = 6;
}
ret = ((offset + width + mask) >> shift) - (offset >> shift);
- if (!IS_GEN2(dev))
+ if (!IS_GEN2(dev_priv))
ret <<= 1;
ret -= 1;
return ret << 2;
@@ -741,12 +737,12 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
int ret, tmp_width;
struct overlay_registers __iomem *regs;
bool scale_changed = false;
- struct drm_device *dev = overlay->dev;
+ struct drm_i915_private *dev_priv = overlay->i915;
u32 swidth, swidthsw, sheight, ostride;
enum pipe pipe = overlay->crtc->pipe;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ lockdep_assert_held(&dev_priv->dev->struct_mutex);
+ WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex));
ret = intel_overlay_release_old_vid(overlay);
if (ret != 0)
@@ -769,7 +765,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
}
oconfig = OCONF_CC_OUT_8BIT;
- if (IS_GEN4(overlay->dev))
+ if (IS_GEN4(dev_priv))
oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
@@ -796,7 +792,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
tmp_width = params->src_w;
swidth = params->src_w;
- swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
+ swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
sheight = params->src_h;
iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
ostride = params->stride_Y;
@@ -806,9 +802,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
int uv_vscale = uv_vsubsampling(params->format);
u32 tmp_U, tmp_V;
swidth |= (params->src_w/uv_hscale) << 16;
- tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
+ tmp_U = calc_swidthsw(dev_priv, params->offset_U,
params->src_w/uv_hscale);
- tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
+ tmp_V = calc_swidthsw(dev_priv, params->offset_V,
params->src_w/uv_hscale);
swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
sheight |= (params->src_h/uv_vscale) << 16;
@@ -840,8 +836,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
overlay->old_vid_bo = overlay->vid_bo;
overlay->vid_bo = new_bo;
- intel_frontbuffer_flip(dev,
- INTEL_FRONTBUFFER_OVERLAY(pipe));
+ intel_frontbuffer_flip(dev_priv->dev, INTEL_FRONTBUFFER_OVERLAY(pipe));
return 0;
@@ -852,12 +847,12 @@ out_unpin:
int intel_overlay_switch_off(struct intel_overlay *overlay)
{
+ struct drm_i915_private *dev_priv = overlay->i915;
struct overlay_registers __iomem *regs;
- struct drm_device *dev = overlay->dev;
int ret;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ lockdep_assert_held(&dev_priv->dev->struct_mutex);
+ WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex));
ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
@@ -897,15 +892,14 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = overlay->i915;
u32 pfit_control = I915_READ(PFIT_CONTROL);
u32 ratio;
/* XXX: This is not the same logic as in the xorg driver, but more in
* line with the intel documentation for the i965
*/
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
/* on i965 use the PGM reg to read out the autoscaler values */
ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
} else {
@@ -948,7 +942,7 @@ static int check_overlay_scaling(struct put_image_params *rec)
return 0;
}
-static int check_overlay_src(struct drm_device *dev,
+static int check_overlay_src(struct drm_i915_private *dev_priv,
struct drm_intel_overlay_put_image *rec,
struct drm_i915_gem_object *new_bo)
{
@@ -959,7 +953,7 @@ static int check_overlay_src(struct drm_device *dev,
u32 tmp;
/* check src dimensions */
- if (IS_845G(dev) || IS_I830(dev)) {
+ if (IS_845G(dev_priv) || IS_I830(dev_priv)) {
if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
return -EINVAL;
@@ -1011,14 +1005,14 @@ static int check_overlay_src(struct drm_device *dev,
return -EINVAL;
/* stride checking */
- if (IS_I830(dev) || IS_845G(dev))
+ if (IS_I830(dev_priv) || IS_845G(dev_priv))
stride_mask = 255;
else
stride_mask = 63;
if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
return -EINVAL;
- if (IS_GEN4(dev) && rec->stride_Y < 512)
+ if (IS_GEN4(dev_priv) && rec->stride_Y < 512)
return -EINVAL;
tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
@@ -1063,13 +1057,13 @@ static int check_overlay_src(struct drm_device *dev,
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
-static int intel_panel_fitter_pipe(struct drm_device *dev)
+static int intel_panel_fitter_pipe(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 pfit_control;
/* i830 doesn't have a panel fitter */
- if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
+ if (INTEL_GEN(dev_priv) <= 3 &&
+ (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
return -1;
pfit_control = I915_READ(PFIT_CONTROL);
@@ -1079,15 +1073,15 @@ static int intel_panel_fitter_pipe(struct drm_device *dev)
return -1;
/* 965 can place panel fitter on either pipe */
- if (IS_GEN4(dev))
+ if (IS_GEN4(dev_priv))
return (pfit_control >> 29) & 0x3;
/* older chips can only use pipe 1 */
return 1;
}
-int intel_overlay_put_image(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_intel_overlay_put_image *put_image_rec = data;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1162,7 +1156,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
/* line too wide, i.e. one-line-mode */
if (mode->hdisplay > 1024 &&
- intel_panel_fitter_pipe(dev) == crtc->pipe) {
+ intel_panel_fitter_pipe(dev_priv) == crtc->pipe) {
overlay->pfit_active = true;
update_pfit_vscale_ratio(overlay);
} else
@@ -1196,7 +1190,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = check_overlay_src(dev, put_image_rec, new_bo);
+ ret = check_overlay_src(dev_priv, put_image_rec, new_bo);
if (ret != 0)
goto out_unlock;
params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
@@ -1284,8 +1278,8 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs)
return 0;
}
-int intel_overlay_attrs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_intel_overlay_attrs *attrs = data;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1309,7 +1303,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
attrs->contrast = overlay->contrast;
attrs->saturation = overlay->saturation;
- if (!IS_GEN2(dev)) {
+ if (!IS_GEN2(dev_priv)) {
attrs->gamma0 = I915_READ(OGAMC0);
attrs->gamma1 = I915_READ(OGAMC1);
attrs->gamma2 = I915_READ(OGAMC2);
@@ -1341,7 +1335,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
intel_overlay_unmap_regs(overlay, regs);
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
goto out_unlock;
if (overlay->active) {
@@ -1371,37 +1365,36 @@ out_unlock:
return ret;
}
-void intel_setup_overlay(struct drm_device *dev)
+void intel_setup_overlay(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
struct drm_i915_gem_object *reg_bo;
struct overlay_registers __iomem *regs;
int ret;
- if (!HAS_OVERLAY(dev))
+ if (!HAS_OVERLAY(dev_priv))
return;
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->dev->struct_mutex);
if (WARN_ON(dev_priv->overlay))
goto out_free;
- overlay->dev = dev;
+ overlay->i915 = dev_priv;
reg_bo = NULL;
- if (!OVERLAY_NEEDS_PHYSICAL(dev))
- reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
- if (reg_bo == NULL)
- reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
+ if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
+ reg_bo = i915_gem_object_create_stolen(dev_priv->dev, PAGE_SIZE);
if (reg_bo == NULL)
+ reg_bo = i915_gem_object_create(dev_priv->dev, PAGE_SIZE);
+ if (IS_ERR(reg_bo))
goto out_free;
overlay->reg_bo = reg_bo;
- if (OVERLAY_NEEDS_PHYSICAL(dev)) {
+ if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) {
ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
if (ret) {
DRM_ERROR("failed to attach phys overlay regs\n");
@@ -1441,25 +1434,23 @@ void intel_setup_overlay(struct drm_device *dev)
intel_overlay_unmap_regs(overlay, regs);
dev_priv->overlay = overlay;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->dev->struct_mutex);
DRM_INFO("initialized overlay support\n");
return;
out_unpin_bo:
- if (!OVERLAY_NEEDS_PHYSICAL(dev))
+ if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
i915_gem_object_ggtt_unpin(reg_bo);
out_free_bo:
drm_gem_object_unreference(&reg_bo->base);
out_free:
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->dev->struct_mutex);
kfree(overlay);
return;
}
-void intel_cleanup_overlay(struct drm_device *dev)
+void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (!dev_priv->overlay)
return;
@@ -1482,18 +1473,17 @@ struct intel_overlay_error_state {
static struct overlay_registers __iomem *
intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = to_i915(overlay->dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct drm_i915_private *dev_priv = overlay->i915;
struct overlay_registers __iomem *regs;
- if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
/* Cast to make sparse happy, but it's wc memory anyway, so
* equivalent to the wc io mapping on X86. */
regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_atomic_wc(ggtt->mappable,
- i915_gem_obj_ggtt_offset(overlay->reg_bo));
+ regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
+ overlay->flip_addr);
return regs;
}
@@ -1501,15 +1491,13 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
struct overlay_registers __iomem *regs)
{
- if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
io_mapping_unmap_atomic(regs);
}
-
struct intel_overlay_error_state *
-intel_overlay_capture_error_state(struct drm_device *dev)
+intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_overlay *overlay = dev_priv->overlay;
struct intel_overlay_error_state *error;
struct overlay_registers __iomem *regs;
@@ -1523,10 +1511,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
error->dovsta = I915_READ(DOVSTA);
error->isr = I915_READ(ISR);
- if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
- else
- error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
+ error->base = overlay->flip_addr;
regs = intel_overlay_map_regs_atomic(overlay);
if (!regs)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 8357d571553a..f0b1602c3258 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -1724,6 +1724,14 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
container_of(panel, struct intel_connector, panel);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
+ intel_dp_aux_init_backlight_funcs(connector) == 0)
+ return;
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
+ intel_dsi_dcs_init_backlight_funcs(connector) == 0)
+ return;
+
if (IS_BROXTON(dev_priv)) {
panel->backlight.setup = bxt_setup_backlight;
panel->backlight.enable = bxt_enable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a7ef45da0a9e..08274591db7e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -26,6 +26,7 @@
*/
#include <linux/cpufreq.h>
+#include <drm/drm_plane_helper.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
@@ -58,6 +59,10 @@ static void bxt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ /* See Bspec note for PSR2_CTL bit 31, Wa#828:bxt */
+ I915_WRITE(CHICKEN_PAR1_1,
+ I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
+
/* WaDisableSDEUnitClockGating:bxt */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
@@ -2012,10 +2017,10 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
}
static uint32_t
-hsw_compute_linetime_wm(struct drm_device *dev,
- struct intel_crtc_state *cstate)
+hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(cstate->base.state);
const struct drm_display_mode *adjusted_mode =
&cstate->base.adjusted_mode;
u32 linetime, ips_linetime;
@@ -2024,7 +2029,7 @@ hsw_compute_linetime_wm(struct drm_device *dev,
return 0;
if (WARN_ON(adjusted_mode->crtc_clock == 0))
return 0;
- if (WARN_ON(dev_priv->cdclk_freq == 0))
+ if (WARN_ON(intel_state->cdclk == 0))
return 0;
/* The WM are computed with base on how long it takes to fill a single
@@ -2033,7 +2038,7 @@ hsw_compute_linetime_wm(struct drm_device *dev,
linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
adjusted_mode->crtc_clock);
ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
- dev_priv->cdclk_freq);
+ intel_state->cdclk);
return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
PIPE_WM_LINETIME_TIME(linetime);
@@ -2146,14 +2151,14 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
{
/* ILK sprite LP0 latency is 1300 ns */
- if (INTEL_INFO(dev)->gen == 5)
+ if (IS_GEN5(dev))
wm[0] = 13;
}
static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
- if (INTEL_INFO(dev)->gen == 5)
+ if (IS_GEN5(dev))
wm[0] = 13;
/* WaDoubleCursorLP3Latency:ivb */
@@ -2309,7 +2314,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
int level, max_level = ilk_wm_max_level(dev), usable_level;
struct ilk_wm_maximums max;
- pipe_wm = &cstate->wm.optimal.ilk;
+ pipe_wm = &cstate->wm.ilk.optimal;
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
struct intel_plane_state *ps;
@@ -2352,7 +2357,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
pipe_wm->wm[0] = pipe_wm->raw_wm[0];
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate);
+ pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
if (!ilk_validate_pipe_wm(dev, pipe_wm))
return -EINVAL;
@@ -2391,7 +2396,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *newstate)
{
- struct intel_pipe_wm *a = &newstate->wm.intermediate;
+ struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
int level, max_level = ilk_wm_max_level(dev);
@@ -2400,7 +2405,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
* currently active watermarks to get values that are safe both before
* and after the vblank.
*/
- *a = newstate->wm.optimal.ilk;
+ *a = newstate->wm.ilk.optimal;
a->pipe_enabled |= b->pipe_enabled;
a->sprites_enabled |= b->sprites_enabled;
a->sprites_scaled |= b->sprites_scaled;
@@ -2429,7 +2434,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
* If our intermediate WM are identical to the final WM, then we can
* omit the post-vblank programming; only update if it's different.
*/
- if (memcmp(a, &newstate->wm.optimal.ilk, sizeof(*a)) == 0)
+ if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0)
newstate->wm.need_postvbl_update = false;
return 0;
@@ -2849,20 +2854,29 @@ skl_wm_plane_id(const struct intel_plane *plane)
static void
skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
const struct intel_crtc_state *cstate,
- const struct intel_wm_config *config,
- struct skl_ddb_entry *alloc /* out */)
+ struct skl_ddb_entry *alloc, /* out */
+ int *num_active /* out */)
{
+ struct drm_atomic_state *state = cstate->base.state;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *for_crtc = cstate->base.crtc;
- struct drm_crtc *crtc;
unsigned int pipe_size, ddb_size;
int nth_active_pipe;
+ int pipe = to_intel_crtc(for_crtc)->pipe;
- if (!cstate->base.active) {
+ if (WARN_ON(!state) || !cstate->base.active) {
alloc->start = 0;
alloc->end = 0;
+ *num_active = hweight32(dev_priv->active_crtcs);
return;
}
+ if (intel_state->active_pipe_changes)
+ *num_active = hweight32(intel_state->active_crtcs);
+ else
+ *num_active = hweight32(dev_priv->active_crtcs);
+
if (IS_BROXTON(dev))
ddb_size = BXT_DDB_SIZE;
else
@@ -2870,25 +2884,29 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
ddb_size -= 4; /* 4 blocks for bypass path allocation */
- nth_active_pipe = 0;
- for_each_crtc(dev, crtc) {
- if (!to_intel_crtc(crtc)->active)
- continue;
-
- if (crtc == for_crtc)
- break;
-
- nth_active_pipe++;
+ /*
+ * If the state doesn't change the active CRTC's, then there's
+ * no need to recalculate; the existing pipe allocation limits
+ * should remain unchanged. Note that we're safe from racing
+ * commits since any racing commit that changes the active CRTC
+ * list would need to grab _all_ crtc locks, including the one
+ * we currently hold.
+ */
+ if (!intel_state->active_pipe_changes) {
+ *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe];
+ return;
}
- pipe_size = ddb_size / config->num_pipes_active;
- alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
+ nth_active_pipe = hweight32(intel_state->active_crtcs &
+ (drm_crtc_mask(for_crtc) - 1));
+ pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
+ alloc->start = nth_active_pipe * ddb_size / *num_active;
alloc->end = alloc->start + pipe_size;
}
-static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
+static unsigned int skl_cursor_allocation(int num_active)
{
- if (config->num_pipes_active == 1)
+ if (num_active == 1)
return 32;
return 8;
@@ -2932,6 +2950,46 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
}
}
+/*
+ * Determines the downscale amount of a plane for the purposes of watermark calculations.
+ * The bspec defines downscale amount as:
+ *
+ * """
+ * Horizontal down scale amount = maximum[1, Horizontal source size /
+ * Horizontal destination size]
+ * Vertical down scale amount = maximum[1, Vertical source size /
+ * Vertical destination size]
+ * Total down scale amount = Horizontal down scale amount *
+ * Vertical down scale amount
+ * """
+ *
+ * Return value is provided in 16.16 fixed point form to retain fractional part.
+ * Caller should take care of dividing & rounding off the value.
+ */
+static uint32_t
+skl_plane_downscale_amount(const struct intel_plane_state *pstate)
+{
+ uint32_t downscale_h, downscale_w;
+ uint32_t src_w, src_h, dst_w, dst_h;
+
+ if (WARN_ON(!pstate->visible))
+ return DRM_PLANE_HELPER_NO_SCALING;
+
+ /* n.b., src is 16.16 fixed point, dst is whole integer */
+ src_w = drm_rect_width(&pstate->src);
+ src_h = drm_rect_height(&pstate->src);
+ dst_w = drm_rect_width(&pstate->dst);
+ dst_h = drm_rect_height(&pstate->dst);
+ if (intel_rotation_90_or_270(pstate->base.rotation))
+ swap(dst_w, dst_h);
+
+ downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
+ downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
+
+ /* Provide result in 16.16 fixed point */
+ return (uint64_t)downscale_w * downscale_h >> 16;
+}
+
static unsigned int
skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
const struct drm_plane_state *pstate,
@@ -2939,7 +2997,16 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
{
struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
struct drm_framebuffer *fb = pstate->fb;
+ uint32_t down_scale_amount, data_rate;
uint32_t width = 0, height = 0;
+ unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
+
+ if (!intel_pstate->visible)
+ return 0;
+ if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
+ return 0;
+ if (y && format != DRM_FORMAT_NV12)
+ return 0;
width = drm_rect_width(&intel_pstate->src) >> 16;
height = drm_rect_height(&intel_pstate->src) >> 16;
@@ -2948,17 +3015,21 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
swap(width, height);
/* for planar format */
- if (fb->pixel_format == DRM_FORMAT_NV12) {
+ if (format == DRM_FORMAT_NV12) {
if (y) /* y-plane data rate */
- return width * height *
- drm_format_plane_cpp(fb->pixel_format, 0);
+ data_rate = width * height *
+ drm_format_plane_cpp(format, 0);
else /* uv-plane data rate */
- return (width / 2) * (height / 2) *
- drm_format_plane_cpp(fb->pixel_format, 1);
+ data_rate = (width / 2) * (height / 2) *
+ drm_format_plane_cpp(format, 1);
+ } else {
+ /* for packed formats */
+ data_rate = width * height * drm_format_plane_cpp(format, 0);
}
- /* for packed formats */
- return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
+ down_scale_amount = skl_plane_downscale_amount(intel_pstate);
+
+ return (uint64_t)data_rate * down_scale_amount >> 16;
}
/*
@@ -2967,86 +3038,188 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
* 3 * 4096 * 8192 * 4 < 2^32
*/
static unsigned int
-skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate)
+skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
- struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_crtc_state *cstate = &intel_cstate->base;
+ struct drm_atomic_state *state = cstate->state;
+ struct drm_crtc *crtc = cstate->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ const struct drm_plane *plane;
const struct intel_plane *intel_plane;
- unsigned int total_data_rate = 0;
+ struct drm_plane_state *pstate;
+ unsigned int rate, total_data_rate = 0;
+ int id;
+ int i;
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- const struct drm_plane_state *pstate = intel_plane->base.state;
+ if (WARN_ON(!state))
+ return 0;
- if (pstate->fb == NULL)
- continue;
+ /* Calculate and cache data rate for each plane */
+ for_each_plane_in_state(state, plane, pstate, i) {
+ id = skl_wm_plane_id(to_intel_plane(plane));
+ intel_plane = to_intel_plane(plane);
- if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
+ if (intel_plane->pipe != intel_crtc->pipe)
continue;
/* packed/uv */
- total_data_rate += skl_plane_relative_data_rate(cstate,
- pstate,
- 0);
+ rate = skl_plane_relative_data_rate(intel_cstate,
+ pstate, 0);
+ intel_cstate->wm.skl.plane_data_rate[id] = rate;
+
+ /* y-plane */
+ rate = skl_plane_relative_data_rate(intel_cstate,
+ pstate, 1);
+ intel_cstate->wm.skl.plane_y_data_rate[id] = rate;
+ }
+
+ /* Calculate CRTC's total data rate from cached values */
+ for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ int id = skl_wm_plane_id(intel_plane);
- if (pstate->fb->pixel_format == DRM_FORMAT_NV12)
- /* y-plane */
- total_data_rate += skl_plane_relative_data_rate(cstate,
- pstate,
- 1);
+ /* packed/uv */
+ total_data_rate += intel_cstate->wm.skl.plane_data_rate[id];
+ total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
}
+ WARN_ON(cstate->plane_mask && total_data_rate == 0);
+
return total_data_rate;
}
-static void
+static uint16_t
+skl_ddb_min_alloc(const struct drm_plane_state *pstate,
+ const int y)
+{
+ struct drm_framebuffer *fb = pstate->fb;
+ struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
+ uint32_t src_w, src_h;
+ uint32_t min_scanlines = 8;
+ uint8_t plane_bpp;
+
+ if (WARN_ON(!fb))
+ return 0;
+
+ /* For packed formats, no y-plane, return 0 */
+ if (y && fb->pixel_format != DRM_FORMAT_NV12)
+ return 0;
+
+ /* For Non Y-tile return 8-blocks */
+ if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED &&
+ fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED)
+ return 8;
+
+ src_w = drm_rect_width(&intel_pstate->src) >> 16;
+ src_h = drm_rect_height(&intel_pstate->src) >> 16;
+
+ if (intel_rotation_90_or_270(pstate->rotation))
+ swap(src_w, src_h);
+
+ /* Halve UV plane width and height for NV12 */
+ if (fb->pixel_format == DRM_FORMAT_NV12 && !y) {
+ src_w /= 2;
+ src_h /= 2;
+ }
+
+ if (fb->pixel_format == DRM_FORMAT_NV12 && !y)
+ plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1);
+ else
+ plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
+
+ if (intel_rotation_90_or_270(pstate->rotation)) {
+ switch (plane_bpp) {
+ case 1:
+ min_scanlines = 32;
+ break;
+ case 2:
+ min_scanlines = 16;
+ break;
+ case 4:
+ min_scanlines = 8;
+ break;
+ case 8:
+ min_scanlines = 4;
+ break;
+ default:
+ WARN(1, "Unsupported pixel depth %u for rotation",
+ plane_bpp);
+ min_scanlines = 32;
+ }
+ }
+
+ return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
+}
+
+static int
skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct skl_ddb_allocation *ddb /* out */)
{
+ struct drm_atomic_state *state = cstate->base.state;
struct drm_crtc *crtc = cstate->base.crtc;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_wm_config *config = &dev_priv->wm.config;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane;
+ struct drm_plane *plane;
+ struct drm_plane_state *pstate;
enum pipe pipe = intel_crtc->pipe;
struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
uint16_t alloc_size, start, cursor_blocks;
- uint16_t minimum[I915_MAX_PLANES];
- uint16_t y_minimum[I915_MAX_PLANES];
+ uint16_t *minimum = cstate->wm.skl.minimum_blocks;
+ uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
unsigned int total_data_rate;
+ int num_active;
+ int id, i;
+
+ if (WARN_ON(!state))
+ return 0;
- skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc);
+ if (!cstate->base.active) {
+ ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
+ memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
+ memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
+ return 0;
+ }
+
+ skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0) {
memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
- memset(&ddb->plane[pipe][PLANE_CURSOR], 0,
- sizeof(ddb->plane[pipe][PLANE_CURSOR]));
- return;
+ return 0;
}
- cursor_blocks = skl_cursor_allocation(config);
+ cursor_blocks = skl_cursor_allocation(num_active);
ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
alloc_size -= cursor_blocks;
- alloc->end -= cursor_blocks;
/* 1. Allocate the mininum required blocks for each active plane */
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- struct drm_plane *plane = &intel_plane->base;
- struct drm_framebuffer *fb = plane->state->fb;
- int id = skl_wm_plane_id(intel_plane);
+ for_each_plane_in_state(state, plane, pstate, i) {
+ intel_plane = to_intel_plane(plane);
+ id = skl_wm_plane_id(intel_plane);
- if (!to_intel_plane_state(plane->state)->visible)
+ if (intel_plane->pipe != pipe)
continue;
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ if (!to_intel_plane_state(pstate)->visible) {
+ minimum[id] = 0;
+ y_minimum[id] = 0;
+ continue;
+ }
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ minimum[id] = 0;
+ y_minimum[id] = 0;
continue;
+ }
- minimum[id] = 8;
- alloc_size -= minimum[id];
- y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0;
- alloc_size -= y_minimum[id];
+ minimum[id] = skl_ddb_min_alloc(pstate, 0);
+ y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
+ }
+
+ for (i = 0; i < PLANE_CURSOR; i++) {
+ alloc_size -= minimum[i];
+ alloc_size -= y_minimum[i];
}
/*
@@ -3056,21 +3229,16 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* FIXME: we may not allocate every single block here.
*/
total_data_rate = skl_get_total_relative_data_rate(cstate);
+ if (total_data_rate == 0)
+ return 0;
start = alloc->start;
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- struct drm_plane *plane = &intel_plane->base;
- struct drm_plane_state *pstate = intel_plane->base.state;
unsigned int data_rate, y_data_rate;
uint16_t plane_blocks, y_plane_blocks = 0;
int id = skl_wm_plane_id(intel_plane);
- if (!to_intel_plane_state(pstate)->visible)
- continue;
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
- continue;
-
- data_rate = skl_plane_relative_data_rate(cstate, pstate, 0);
+ data_rate = cstate->wm.skl.plane_data_rate[id];
/*
* allocation for (packed formats) or (uv-plane part of planar format):
@@ -3081,30 +3249,32 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
total_data_rate);
- ddb->plane[pipe][id].start = start;
- ddb->plane[pipe][id].end = start + plane_blocks;
+ /* Leave disabled planes at (0,0) */
+ if (data_rate) {
+ ddb->plane[pipe][id].start = start;
+ ddb->plane[pipe][id].end = start + plane_blocks;
+ }
start += plane_blocks;
/*
* allocation for y_plane part of planar format:
*/
- if (pstate->fb->pixel_format == DRM_FORMAT_NV12) {
- y_data_rate = skl_plane_relative_data_rate(cstate,
- pstate,
- 1);
- y_plane_blocks = y_minimum[id];
- y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
- total_data_rate);
+ y_data_rate = cstate->wm.skl.plane_y_data_rate[id];
+
+ y_plane_blocks = y_minimum[id];
+ y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
+ total_data_rate);
+ if (y_data_rate) {
ddb->y_plane[pipe][id].start = start;
ddb->y_plane[pipe][id].end = start + y_plane_blocks;
-
- start += y_plane_blocks;
}
+ start += y_plane_blocks;
}
+ return 0;
}
static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
@@ -3161,35 +3331,41 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
return ret;
}
-static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
- const struct intel_crtc *intel_crtc)
+static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
+ struct intel_plane_state *pstate)
{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+ uint64_t adjusted_pixel_rate;
+ uint64_t downscale_amount;
+ uint64_t pixel_rate;
+
+ /* Shouldn't reach here on disabled planes... */
+ if (WARN_ON(!pstate->visible))
+ return 0;
/*
- * If ddb allocation of pipes changed, it may require recalculation of
- * watermarks
+ * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
+ * with additional adjustments for plane-specific scaling.
*/
- if (memcmp(new_ddb->pipe, cur_ddb->pipe, sizeof(new_ddb->pipe)))
- return true;
+ adjusted_pixel_rate = skl_pipe_pixel_rate(cstate);
+ downscale_amount = skl_plane_downscale_amount(pstate);
+
+ pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
+ WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
- return false;
+ return pixel_rate;
}
-static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
- struct intel_crtc_state *cstate,
- struct intel_plane *intel_plane,
- uint16_t ddb_allocation,
- int level,
- uint16_t *out_blocks, /* out */
- uint8_t *out_lines /* out */)
+static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *cstate,
+ struct intel_plane_state *intel_pstate,
+ uint16_t ddb_allocation,
+ int level,
+ uint16_t *out_blocks, /* out */
+ uint8_t *out_lines, /* out */
+ bool *enabled /* out */)
{
- struct drm_plane *plane = &intel_plane->base;
- struct drm_framebuffer *fb = plane->state->fb;
- struct intel_plane_state *intel_pstate =
- to_intel_plane_state(plane->state);
+ struct drm_plane_state *pstate = &intel_pstate->base;
+ struct drm_framebuffer *fb = pstate->fb;
uint32_t latency = dev_priv->wm.skl_latency[level];
uint32_t method1, method2;
uint32_t plane_bytes_per_line, plane_blocks_per_line;
@@ -3197,20 +3373,24 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
uint32_t selected_result;
uint8_t cpp;
uint32_t width = 0, height = 0;
+ uint32_t plane_pixel_rate;
- if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
- return false;
+ if (latency == 0 || !cstate->base.active || !intel_pstate->visible) {
+ *enabled = false;
+ return 0;
+ }
width = drm_rect_width(&intel_pstate->src) >> 16;
height = drm_rect_height(&intel_pstate->src) >> 16;
- if (intel_rotation_90_or_270(plane->state->rotation))
+ if (intel_rotation_90_or_270(pstate->rotation))
swap(width, height);
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
- method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
- cpp, latency);
- method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
+ plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
+
+ method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
+ method2 = skl_wm_method2(plane_pixel_rate,
cstate->base.adjusted_mode.crtc_htotal,
width,
cpp,
@@ -3224,7 +3404,7 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
uint32_t min_scanlines = 4;
uint32_t y_tile_minimum;
- if (intel_rotation_90_or_270(plane->state->rotation)) {
+ if (intel_rotation_90_or_270(pstate->rotation)) {
int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
drm_format_plane_cpp(fb->pixel_format, 1) :
drm_format_plane_cpp(fb->pixel_format, 0);
@@ -3260,40 +3440,99 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
res_blocks++;
}
- if (res_blocks >= ddb_allocation || res_lines > 31)
- return false;
+ if (res_blocks >= ddb_allocation || res_lines > 31) {
+ *enabled = false;
+
+ /*
+ * If there are no valid level 0 watermarks, then we can't
+ * support this display configuration.
+ */
+ if (level) {
+ return 0;
+ } else {
+ DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
+ DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n",
+ to_intel_crtc(cstate->base.crtc)->pipe,
+ skl_wm_plane_id(to_intel_plane(pstate->plane)),
+ res_blocks, ddb_allocation, res_lines);
+
+ return -EINVAL;
+ }
+ }
*out_blocks = res_blocks;
*out_lines = res_lines;
+ *enabled = true;
- return true;
+ return 0;
}
-static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb,
- struct intel_crtc_state *cstate,
- int level,
- struct skl_wm_level *result)
+static int
+skl_compute_wm_level(const struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb,
+ struct intel_crtc_state *cstate,
+ int level,
+ struct skl_wm_level *result)
{
struct drm_device *dev = dev_priv->dev;
+ struct drm_atomic_state *state = cstate->base.state;
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct drm_plane *plane;
struct intel_plane *intel_plane;
+ struct intel_plane_state *intel_pstate;
uint16_t ddb_blocks;
enum pipe pipe = intel_crtc->pipe;
+ int ret;
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ /*
+ * We'll only calculate watermarks for planes that are actually
+ * enabled, so make sure all other planes are set as disabled.
+ */
+ memset(result, 0, sizeof(*result));
+
+ for_each_intel_plane_mask(dev, intel_plane, cstate->base.plane_mask) {
int i = skl_wm_plane_id(intel_plane);
+ plane = &intel_plane->base;
+ intel_pstate = NULL;
+ if (state)
+ intel_pstate =
+ intel_atomic_get_existing_plane_state(state,
+ intel_plane);
+
+ /*
+ * Note: If we start supporting multiple pending atomic commits
+ * against the same planes/CRTC's in the future, plane->state
+ * will no longer be the correct pre-state to use for the
+ * calculations here and we'll need to change where we get the
+ * 'unchanged' plane data from.
+ *
+ * For now this is fine because we only allow one queued commit
+ * against a CRTC. Even if the plane isn't modified by this
+ * transaction and we don't have a plane lock, we still have
+ * the CRTC's lock, so we know that no other transactions are
+ * racing with us to update it.
+ */
+ if (!intel_pstate)
+ intel_pstate = to_intel_plane_state(plane->state);
+
+ WARN_ON(!intel_pstate->base.fb);
+
ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
- result->plane_en[i] = skl_compute_plane_wm(dev_priv,
- cstate,
- intel_plane,
- ddb_blocks,
- level,
- &result->plane_res_b[i],
- &result->plane_res_l[i]);
+ ret = skl_compute_plane_wm(dev_priv,
+ cstate,
+ intel_pstate,
+ ddb_blocks,
+ level,
+ &result->plane_res_b[i],
+ &result->plane_res_l[i],
+ &result->plane_en[i]);
+ if (ret)
+ return ret;
}
+
+ return 0;
}
static uint32_t
@@ -3327,21 +3566,26 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
}
}
-static void skl_compute_pipe_wm(struct intel_crtc_state *cstate,
- struct skl_ddb_allocation *ddb,
- struct skl_pipe_wm *pipe_wm)
+static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
+ struct skl_ddb_allocation *ddb,
+ struct skl_pipe_wm *pipe_wm)
{
struct drm_device *dev = cstate->base.crtc->dev;
const struct drm_i915_private *dev_priv = dev->dev_private;
int level, max_level = ilk_wm_max_level(dev);
+ int ret;
for (level = 0; level <= max_level; level++) {
- skl_compute_wm_level(dev_priv, ddb, cstate,
- level, &pipe_wm->wm[level]);
+ ret = skl_compute_wm_level(dev_priv, ddb, cstate,
+ level, &pipe_wm->wm[level]);
+ if (ret)
+ return ret;
}
pipe_wm->linetime = skl_compute_linetime_wm(cstate);
skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
+
+ return 0;
}
static void skl_compute_wm_results(struct drm_device *dev,
@@ -3421,7 +3665,9 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv,
int i, level, max_level = ilk_wm_max_level(dev);
enum pipe pipe = crtc->pipe;
- if (!new->dirty[pipe])
+ if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0)
+ continue;
+ if (!crtc->active)
continue;
I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
@@ -3588,87 +3834,144 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
}
}
-static bool skl_update_pipe_wm(struct drm_crtc *crtc,
- struct skl_ddb_allocation *ddb, /* out */
- struct skl_pipe_wm *pipe_wm /* out */)
+static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
+ struct skl_ddb_allocation *ddb, /* out */
+ struct skl_pipe_wm *pipe_wm, /* out */
+ bool *changed /* out */)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
+ struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc);
+ struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
+ int ret;
- skl_allocate_pipe_ddb(cstate, ddb);
- skl_compute_pipe_wm(cstate, ddb, pipe_wm);
+ ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
+ if (ret)
+ return ret;
if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
- return false;
-
- intel_crtc->wm.active.skl = *pipe_wm;
+ *changed = false;
+ else
+ *changed = true;
- return true;
+ return 0;
}
-static void skl_update_other_pipe_wm(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct skl_wm_values *r)
+static int
+skl_compute_ddb(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct intel_crtc *intel_crtc;
- struct intel_crtc *this_crtc = to_intel_crtc(crtc);
+ struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
+ unsigned realloc_pipes = dev_priv->active_crtcs;
+ int ret;
/*
- * If the WM update hasn't changed the allocation for this_crtc (the
- * crtc we are currently computing the new WM values for), other
- * enabled crtcs will keep the same allocation and we don't need to
- * recompute anything for them.
+ * If this is our first atomic update following hardware readout,
+ * we can't trust the DDB that the BIOS programmed for us. Let's
+ * pretend that all pipes switched active status so that we'll
+ * ensure a full DDB recompute.
*/
- if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
- return;
+ if (dev_priv->wm.distrust_bios_wm)
+ intel_state->active_pipe_changes = ~0;
/*
- * Otherwise, because of this_crtc being freshly enabled/disabled, the
- * other active pipes need new DDB allocation and WM values.
+ * If the modeset changes which CRTC's are active, we need to
+ * recompute the DDB allocation for *all* active pipes, even
+ * those that weren't otherwise being modified in any way by this
+ * atomic commit. Due to the shrinking of the per-pipe allocations
+ * when new active CRTC's are added, it's possible for a pipe that
+ * we were already using and aren't changing at all here to suddenly
+ * become invalid if its DDB needs exceeds its new allocation.
+ *
+ * Note that if we wind up doing a full DDB recompute, we can't let
+ * any other display updates race with this transaction, so we need
+ * to grab the lock on *all* CRTC's.
*/
- for_each_intel_crtc(dev, intel_crtc) {
- struct skl_pipe_wm pipe_wm = {};
- bool wm_changed;
-
- if (this_crtc->pipe == intel_crtc->pipe)
- continue;
-
- if (!intel_crtc->active)
- continue;
+ if (intel_state->active_pipe_changes) {
+ realloc_pipes = ~0;
+ intel_state->wm_results.dirty_pipes = ~0;
+ }
- wm_changed = skl_update_pipe_wm(&intel_crtc->base,
- &r->ddb, &pipe_wm);
+ for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
+ struct intel_crtc_state *cstate;
- /*
- * If we end up re-computing the other pipe WM values, it's
- * because it was really needed, so we expect the WM values to
- * be different.
- */
- WARN_ON(!wm_changed);
+ cstate = intel_atomic_get_crtc_state(state, intel_crtc);
+ if (IS_ERR(cstate))
+ return PTR_ERR(cstate);
- skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc);
- r->dirty[intel_crtc->pipe] = true;
+ ret = skl_allocate_pipe_ddb(cstate, ddb);
+ if (ret)
+ return ret;
}
+
+ return 0;
}
-static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe)
+static int
+skl_compute_wm(struct drm_atomic_state *state)
{
- watermarks->wm_linetime[pipe] = 0;
- memset(watermarks->plane[pipe], 0,
- sizeof(uint32_t) * 8 * I915_MAX_PLANES);
- memset(watermarks->plane_trans[pipe],
- 0, sizeof(uint32_t) * I915_MAX_PLANES);
- watermarks->plane_trans[pipe][PLANE_CURSOR] = 0;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *cstate;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct skl_wm_values *results = &intel_state->wm_results;
+ struct skl_pipe_wm *pipe_wm;
+ bool changed = false;
+ int ret, i;
- /* Clear ddb entries for pipe */
- memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry));
- memset(&watermarks->ddb.plane[pipe], 0,
- sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
- memset(&watermarks->ddb.y_plane[pipe], 0,
- sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
- memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0,
- sizeof(struct skl_ddb_entry));
+ /*
+ * If this transaction isn't actually touching any CRTC's, don't
+ * bother with watermark calculation. Note that if we pass this
+ * test, we're guaranteed to hold at least one CRTC state mutex,
+ * which means we can safely use values like dev_priv->active_crtcs
+ * since any racing commits that want to update them would need to
+ * hold _all_ CRTC state mutexes.
+ */
+ for_each_crtc_in_state(state, crtc, cstate, i)
+ changed = true;
+ if (!changed)
+ return 0;
+
+ /* Clear all dirty flags */
+ results->dirty_pipes = 0;
+
+ ret = skl_compute_ddb(state);
+ if (ret)
+ return ret;
+
+ /*
+ * Calculate WM's for all pipes that are part of this transaction.
+ * Note that the DDB allocation above may have added more CRTC's that
+ * weren't otherwise being modified (and set bits in dirty_pipes) if
+ * pipe allocations had to change.
+ *
+ * FIXME: Now that we're doing this in the atomic check phase, we
+ * should allow skl_update_pipe_wm() to return failure in cases where
+ * no suitable watermark values can be found.
+ */
+ for_each_crtc_in_state(state, crtc, cstate, i) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *intel_cstate =
+ to_intel_crtc_state(cstate);
+
+ pipe_wm = &intel_cstate->wm.skl.optimal;
+ ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm,
+ &changed);
+ if (ret)
+ return ret;
+
+ if (changed)
+ results->dirty_pipes |= drm_crtc_mask(crtc);
+
+ if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
+ /* This pipe's WM's did not change */
+ continue;
+
+ intel_cstate->update_wm_pre = true;
+ skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc);
+ }
+ return 0;
}
static void skl_update_wm(struct drm_crtc *crtc)
@@ -3678,26 +3981,22 @@ static void skl_update_wm(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct skl_wm_values *results = &dev_priv->wm.skl_results;
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
- struct skl_pipe_wm *pipe_wm = &cstate->wm.optimal.skl;
-
+ struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
- /* Clear all dirty flags */
- memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES);
-
- skl_clear_wm(results, intel_crtc->pipe);
-
- if (!skl_update_pipe_wm(crtc, &results->ddb, pipe_wm))
+ if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
return;
- skl_compute_wm_results(dev, pipe_wm, results, intel_crtc);
- results->dirty[intel_crtc->pipe] = true;
+ intel_crtc->wm.active.skl = *pipe_wm;
+
+ mutex_lock(&dev_priv->wm.wm_mutex);
- skl_update_other_pipe_wm(dev, crtc, results);
skl_write_wm_values(dev_priv, results);
skl_flush_wm_values(dev_priv, results);
/* store the new configuration */
dev_priv->wm.skl_hw = *results;
+
+ mutex_unlock(&dev_priv->wm.wm_mutex);
}
static void ilk_compute_wm_config(struct drm_device *dev,
@@ -3757,7 +4056,7 @@ static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
mutex_lock(&dev_priv->wm.wm_mutex);
- intel_crtc->wm.active.ilk = cstate->wm.intermediate;
+ intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
ilk_program_watermarks(dev_priv);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -3769,7 +4068,7 @@ static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
mutex_lock(&dev_priv->wm.wm_mutex);
if (cstate->wm.need_postvbl_update) {
- intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
+ intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
ilk_program_watermarks(dev_priv);
}
mutex_unlock(&dev_priv->wm.wm_mutex);
@@ -3826,7 +4125,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
- struct skl_pipe_wm *active = &cstate->wm.optimal.skl;
+ struct skl_pipe_wm *active = &cstate->wm.skl.optimal;
enum pipe pipe = intel_crtc->pipe;
int level, i, max_level;
uint32_t temp;
@@ -3849,7 +4148,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
if (!intel_crtc->active)
return;
- hw->dirty[pipe] = true;
+ hw->dirty_pipes |= drm_crtc_mask(crtc);
active->linetime = hw->wm_linetime[pipe];
@@ -3883,6 +4182,14 @@ void skl_wm_get_hw_state(struct drm_device *dev)
skl_ddb_get_hw_state(dev_priv, ddb);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
skl_pipe_wm_get_hw_state(crtc);
+
+ if (dev_priv->active_crtcs) {
+ /* Fully recompute DDB on first atomic commit */
+ dev_priv->wm.distrust_bios_wm = true;
+ } else {
+ /* Easy/common case; just sanitize DDB now if everything off */
+ memset(ddb, 0, sizeof(*ddb));
+ }
}
static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
@@ -3892,7 +4199,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
struct ilk_wm_values *hw = &dev_priv->wm.hw;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
- struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
+ struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
enum pipe pipe = intel_crtc->pipe;
static const i915_reg_t wm0_pipe_reg[] = {
[PIPE_A] = WM0_PIPEA_ILK,
@@ -4169,9 +4476,8 @@ DEFINE_SPINLOCK(mchdev_lock);
* mchdev_lock. */
static struct drm_i915_private *i915_mch_dev;
-bool ironlake_set_drps(struct drm_device *dev, u8 val)
+bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl;
assert_spin_locked(&mchdev_lock);
@@ -4193,9 +4499,8 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val)
return true;
}
-static void ironlake_enable_drps(struct drm_device *dev)
+static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl;
u8 fmax, fmin, fstart, vstart;
@@ -4252,7 +4557,7 @@ static void ironlake_enable_drps(struct drm_device *dev)
DRM_ERROR("stuck trying to change perf mode\n");
mdelay(1);
- ironlake_set_drps(dev, fstart);
+ ironlake_set_drps(dev_priv, fstart);
dev_priv->ips.last_count1 = I915_READ(DMIEC) +
I915_READ(DDREC) + I915_READ(CSIEC);
@@ -4263,9 +4568,8 @@ static void ironlake_enable_drps(struct drm_device *dev)
spin_unlock_irq(&mchdev_lock);
}
-static void ironlake_disable_drps(struct drm_device *dev)
+static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl;
spin_lock_irq(&mchdev_lock);
@@ -4280,7 +4584,7 @@ static void ironlake_disable_drps(struct drm_device *dev)
I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
/* Go back to the starting frequency */
- ironlake_set_drps(dev, dev_priv->ips.fstart);
+ ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
mdelay(1);
rgvswctl |= MEMCTL_CMD_STS;
I915_WRITE(MEMSWCTL, rgvswctl);
@@ -4424,12 +4728,10 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
/* gen6_set_rps is called to update the frequency request, but should also be
* called when the range (min_delay and max_delay) is modified so that we can
* update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
-static void gen6_set_rps(struct drm_device *dev, u8 val)
+static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
return;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -4442,10 +4744,10 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
if (val != dev_priv->rps.cur_freq) {
gen6_set_rps_thresholds(dev_priv, val);
- if (IS_GEN9(dev))
+ if (IS_GEN9(dev_priv))
I915_WRITE(GEN6_RPNSWREQ,
GEN9_FREQUENCY(val));
- else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
I915_WRITE(GEN6_RPNSWREQ,
HSW_FREQUENCY(val));
else
@@ -4467,15 +4769,13 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
}
-static void valleyview_set_rps(struct drm_device *dev, u8 val)
+static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_freq);
WARN_ON(val < dev_priv->rps.min_freq);
- if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
+ if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
"Odd GPU freq value\n"))
val &= ~1;
@@ -4508,7 +4808,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
/* Wake up the media well, as that takes a lot less
* power than the Render well. */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
- valleyview_set_rps(dev_priv->dev, val);
+ valleyview_set_rps(dev_priv, val);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
}
@@ -4526,14 +4826,12 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
void gen6_rps_idle(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
-
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_set_rps_idle(dev_priv);
else
- gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+ gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
dev_priv->rps.last_adj = 0;
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
}
@@ -4581,49 +4879,39 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
spin_unlock(&dev_priv->rps.client_lock);
}
-void intel_set_rps(struct drm_device *dev, u8 val)
+void intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
{
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
- valleyview_set_rps(dev, val);
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ valleyview_set_rps(dev_priv, val);
else
- gen6_set_rps(dev, val);
+ gen6_set_rps(dev_priv, val);
}
-static void gen9_disable_rc6(struct drm_device *dev)
+static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN9_PG_ENABLE, 0);
}
-static void gen9_disable_rps(struct drm_device *dev)
+static void gen9_disable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
I915_WRITE(GEN6_RP_CONTROL, 0);
}
-static void gen6_disable_rps(struct drm_device *dev)
+static void gen6_disable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
I915_WRITE(GEN6_RP_CONTROL, 0);
}
-static void cherryview_disable_rps(struct drm_device *dev)
+static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
I915_WRITE(GEN6_RC_CONTROL, 0);
}
-static void valleyview_disable_rps(struct drm_device *dev)
+static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* we're doing forcewake before Disabling RC6,
* This what the BIOS expects when going into suspend */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -4633,15 +4921,15 @@ static void valleyview_disable_rps(struct drm_device *dev)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
+static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
{
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
mode = GEN6_RC_CTL_RC6_ENABLE;
else
mode = 0;
}
- if (HAS_RC6p(dev))
+ if (HAS_RC6p(dev_priv))
DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
@@ -4652,9 +4940,8 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
}
-static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
+static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool enable_rc6 = true;
unsigned long rc6_ctx_base;
@@ -4695,16 +4982,16 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
return enable_rc6;
}
-int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
+int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
{
/* No RC6 before Ironlake and code is gone for ilk. */
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_INFO(dev_priv)->gen < 6)
return 0;
if (!enable_rc6)
return 0;
- if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) {
+ if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
DRM_INFO("RC6 disabled by BIOS\n");
return 0;
}
@@ -4713,7 +5000,7 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
if (enable_rc6 >= 0) {
int mask;
- if (HAS_RC6p(dev))
+ if (HAS_RC6p(dev_priv))
mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
INTEL_RC6pp_ENABLE;
else
@@ -4726,20 +5013,14 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
return enable_rc6 & mask;
}
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
return INTEL_RC6_ENABLE;
}
-int intel_enable_rc6(const struct drm_device *dev)
-{
- return i915.enable_rc6;
-}
-
-static void gen6_init_rps_frequencies(struct drm_device *dev)
+static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t rp_state_cap;
u32 ddcc_status = 0;
int ret;
@@ -4747,7 +5028,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
/* All of these values are in units of 50MHz */
dev_priv->rps.cur_freq = 0;
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
@@ -4763,8 +5044,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
- IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
+ IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
ret = sandybridge_pcode_read(dev_priv,
HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
&ddcc_status);
@@ -4776,7 +5057,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.max_freq);
}
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
/* Store the frequency values in 16.66 MHZ units, which is
the natural hardware unit for SKL */
dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
@@ -4793,7 +5074,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
if (dev_priv->rps.min_freq_softlimit == 0) {
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
dev_priv->rps.min_freq_softlimit =
max_t(int, dev_priv->rps.efficient_freq,
intel_freq_opcode(dev_priv, 450));
@@ -4804,16 +5085,14 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
}
/* See the Gen9_GT_PM_Programming_Guide doc for the below */
-static void gen9_enable_rps(struct drm_device *dev)
+static void gen9_enable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- gen6_init_rps_frequencies(dev);
+ gen6_init_rps_frequencies(dev_priv);
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
/*
* BIOS could leave the Hw Turbo enabled, so need to explicitly
* clear out the Control register just to avoid inconsitency
@@ -4823,7 +5102,7 @@ static void gen9_enable_rps(struct drm_device *dev)
* if the Turbo is left enabled in the Control register, as the
* Up/Down interrupts would remain masked.
*/
- gen9_disable_rps(dev);
+ gen9_disable_rps(dev_priv);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return;
}
@@ -4842,14 +5121,13 @@ static void gen9_enable_rps(struct drm_device *dev)
* Up/Down EI & threshold registers, as well as the RP_CONTROL,
* RP_INTERRUPT_LIMITS & RPNSWREQ registers */
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+ gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void gen9_enable_rc6(struct drm_device *dev)
+static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
uint32_t rc6_mask = 0;
@@ -4866,7 +5144,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
/* 2b: Program RC6 thresholds.*/
/* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
- if (IS_SKYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv))
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
else
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
@@ -4875,7 +5153,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
for_each_engine(engine, dev_priv)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
- if (HAS_GUC_UCODE(dev))
+ if (HAS_GUC(dev_priv))
I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -4885,12 +5163,12 @@ static void gen9_enable_rc6(struct drm_device *dev)
I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
/* 3a: Enable RC6 */
- if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+ if (intel_enable_rc6() & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
/* WaRsUseTimeoutMode */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN7_RC_CTL_TO_MODE |
@@ -4906,19 +5184,17 @@ static void gen9_enable_rc6(struct drm_device *dev)
* 3b: Enable Coarse Power Gating only when RC6 is enabled.
* WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
*/
- if (NEEDS_WaRsDisableCoarsePowerGating(dev))
+ if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
I915_WRITE(GEN9_PG_ENABLE, 0);
else
I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
(GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-
}
-static void gen8_enable_rps(struct drm_device *dev)
+static void gen8_enable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
uint32_t rc6_mask = 0;
@@ -4933,7 +5209,7 @@ static void gen8_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_CONTROL, 0);
/* Initialize rps frequencies */
- gen6_init_rps_frequencies(dev);
+ gen6_init_rps_frequencies(dev_priv);
/* 2b: Program RC6 thresholds.*/
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
@@ -4942,16 +5218,16 @@ static void gen8_enable_rps(struct drm_device *dev)
for_each_engine(engine, dev_priv)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
- if (IS_BROADWELL(dev))
+ if (IS_BROADWELL(dev_priv))
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
else
I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
/* 3: Enable RC6 */
- if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+ if (intel_enable_rc6() & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
- intel_print_rc6_info(dev, rc6_mask);
- if (IS_BROADWELL(dev))
+ intel_print_rc6_info(dev_priv, rc6_mask);
+ if (IS_BROADWELL(dev_priv))
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN7_RC_CTL_TO_MODE |
rc6_mask);
@@ -4992,14 +5268,13 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 6: Ring frequency + overclocking (our driver does this later */
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+ gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void gen6_enable_rps(struct drm_device *dev)
+static void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
u32 gtfifodbg;
@@ -5026,7 +5301,7 @@ static void gen6_enable_rps(struct drm_device *dev)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* Initialize rps frequencies */
- gen6_init_rps_frequencies(dev);
+ gen6_init_rps_frequencies(dev_priv);
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -5042,7 +5317,7 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
else
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
@@ -5050,12 +5325,12 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
/* Check if we are enabling RC6 */
- rc6_mode = intel_enable_rc6(dev_priv->dev);
+ rc6_mode = intel_enable_rc6();
if (rc6_mode & INTEL_RC6_ENABLE)
rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
/* We don't use those on Haswell */
- if (!IS_HASWELL(dev)) {
+ if (!IS_HASWELL(dev_priv)) {
if (rc6_mode & INTEL_RC6p_ENABLE)
rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
@@ -5063,7 +5338,7 @@ static void gen6_enable_rps(struct drm_device *dev)
rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
}
- intel_print_rc6_info(dev, rc6_mask);
+ intel_print_rc6_info(dev_priv, rc6_mask);
I915_WRITE(GEN6_RC_CONTROL,
rc6_mask |
@@ -5087,13 +5362,13 @@ static void gen6_enable_rps(struct drm_device *dev)
}
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+ gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
- if (IS_GEN6(dev) && ret) {
+ if (IS_GEN6(dev_priv) && ret) {
DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
- } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+ } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
rc6vids &= 0xffff00;
@@ -5106,9 +5381,8 @@ static void gen6_enable_rps(struct drm_device *dev)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void __gen6_update_ring_freq(struct drm_device *dev)
+static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int min_freq = 15;
unsigned int gpu_freq;
unsigned int max_ia_freq, min_ring_freq;
@@ -5137,7 +5411,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
/* convert DDR frequency from units of 266.6MHz to bandwidth */
min_ring_freq = mult_frac(min_ring_freq, 8, 3);
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
/* Convert GT frequency to 50 HZ units */
min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
@@ -5155,16 +5429,16 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
int diff = max_gpu_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
/*
* ring_freq = 2 * GT. ring_freq is in 100MHz units
* No floor required for ring frequency on SKL.
*/
ring_freq = gpu_freq;
- } else if (INTEL_INFO(dev)->gen >= 8) {
+ } else if (INTEL_INFO(dev_priv)->gen >= 8) {
/* max(2 * GT, DDR). NB: GT is 50MHz units */
ring_freq = max(min_ring_freq, gpu_freq);
- } else if (IS_HASWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv)) {
ring_freq = mult_frac(gpu_freq, 5, 4);
ring_freq = max(min_ring_freq, ring_freq);
/* leave ia_freq as the default, chosen by cpufreq */
@@ -5191,26 +5465,23 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
}
}
-void gen6_update_ring_freq(struct drm_device *dev)
+void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!HAS_CORE_RING_FREQ(dev))
+ if (!HAS_CORE_RING_FREQ(dev_priv))
return;
mutex_lock(&dev_priv->rps.hw_lock);
- __gen6_update_ring_freq(dev);
+ __gen6_update_ring_freq(dev_priv);
mutex_unlock(&dev_priv->rps.hw_lock);
}
static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
u32 val, rp0;
val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
- switch (INTEL_INFO(dev)->eu_total) {
+ switch (INTEL_INFO(dev_priv)->eu_total) {
case 8:
/* (2 * 4) config */
rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
@@ -5321,9 +5592,8 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
}
-static void cherryview_setup_pctx(struct drm_device *dev)
+static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long pctx_paddr, paddr;
u32 pcbr;
@@ -5342,15 +5612,14 @@ static void cherryview_setup_pctx(struct drm_device *dev)
DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
}
-static void valleyview_setup_pctx(struct drm_device *dev)
+static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *pctx;
unsigned long pctx_paddr;
u32 pcbr;
int pctx_size = 24*1024;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->dev->struct_mutex);
pcbr = I915_READ(VLV_PCBR);
if (pcbr) {
@@ -5375,7 +5644,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
* overlap with other ranges, such as the frame buffer, protected
* memory, or any other relevant ranges.
*/
- pctx = i915_gem_object_create_stolen(dev, pctx_size);
+ pctx = i915_gem_object_create_stolen(dev_priv->dev, pctx_size);
if (!pctx) {
DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
goto out;
@@ -5387,13 +5656,11 @@ static void valleyview_setup_pctx(struct drm_device *dev)
out:
DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
dev_priv->vlv_pctx = pctx;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->dev->struct_mutex);
}
-static void valleyview_cleanup_pctx(struct drm_device *dev)
+static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (WARN_ON(!dev_priv->vlv_pctx))
return;
@@ -5412,12 +5679,11 @@ static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
dev_priv->rps.gpll_ref_freq);
}
-static void valleyview_init_gt_powersave(struct drm_device *dev)
+static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
- valleyview_setup_pctx(dev);
+ valleyview_setup_pctx(dev_priv);
vlv_init_gpll_ref_freq(dev_priv);
@@ -5471,12 +5737,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
mutex_unlock(&dev_priv->rps.hw_lock);
}
-static void cherryview_init_gt_powersave(struct drm_device *dev)
+static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
- cherryview_setup_pctx(dev);
+ cherryview_setup_pctx(dev_priv);
vlv_init_gpll_ref_freq(dev_priv);
@@ -5536,14 +5801,13 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
mutex_unlock(&dev_priv->rps.hw_lock);
}
-static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
+static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
{
- valleyview_cleanup_pctx(dev);
+ valleyview_cleanup_pctx(dev_priv);
}
-static void cherryview_enable_rps(struct drm_device *dev)
+static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
u32 gtfifodbg, val, rc6_mode = 0, pcbr;
@@ -5588,8 +5852,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
pcbr = I915_READ(VLV_PCBR);
/* 3: Enable RC6 */
- if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
- (pcbr >> VLV_PCBR_ADDR_SHIFT))
+ if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
+ (pcbr >> VLV_PCBR_ADDR_SHIFT))
rc6_mode = GEN7_RC_CTL_TO_MODE;
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
@@ -5634,14 +5898,13 @@ static void cherryview_enable_rps(struct drm_device *dev)
intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
dev_priv->rps.idle_freq);
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+ valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void valleyview_enable_rps(struct drm_device *dev)
+static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
u32 gtfifodbg, val, rc6_mode = 0;
@@ -5694,10 +5957,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
- if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+ if (intel_enable_rc6() & INTEL_RC6_ENABLE)
rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
- intel_print_rc6_info(dev, rc6_mode);
+ intel_print_rc6_info(dev_priv, rc6_mode);
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
@@ -5724,7 +5987,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
dev_priv->rps.idle_freq);
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+ valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -5814,10 +6077,9 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
unsigned long val;
- if (INTEL_INFO(dev)->gen != 5)
+ if (INTEL_INFO(dev_priv)->gen != 5)
return 0;
spin_lock_irq(&mchdev_lock);
@@ -5857,11 +6119,10 @@ static int _pxvid_to_vd(u8 pxvid)
static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
{
- struct drm_device *dev = dev_priv->dev;
const int vd = _pxvid_to_vd(pxvid);
const int vm = vd - 1125;
- if (INTEL_INFO(dev)->is_mobile)
+ if (INTEL_INFO(dev_priv)->is_mobile)
return vm > 0 ? vm : 0;
return vd;
@@ -5902,9 +6163,7 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
-
- if (INTEL_INFO(dev)->gen != 5)
+ if (INTEL_INFO(dev_priv)->gen != 5)
return;
spin_lock_irq(&mchdev_lock);
@@ -5953,10 +6212,9 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
unsigned long val;
- if (INTEL_INFO(dev)->gen != 5)
+ if (INTEL_INFO(dev_priv)->gen != 5)
return 0;
spin_lock_irq(&mchdev_lock);
@@ -6097,7 +6355,7 @@ bool i915_gpu_turbo_disable(void)
dev_priv->ips.max_delay = dev_priv->ips.fstart;
- if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
+ if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
ret = false;
out_unlock:
@@ -6145,9 +6403,8 @@ void intel_gpu_ips_teardown(void)
spin_unlock_irq(&mchdev_lock);
}
-static void intel_init_emon(struct drm_device *dev)
+static void intel_init_emon(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 lcfuse;
u8 pxw[16];
int i;
@@ -6216,10 +6473,8 @@ static void intel_init_emon(struct drm_device *dev)
dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
}
-void intel_init_gt_powersave(struct drm_device *dev)
+void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/*
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
* requirement.
@@ -6229,74 +6484,66 @@ void intel_init_gt_powersave(struct drm_device *dev)
intel_runtime_pm_get(dev_priv);
}
- if (IS_CHERRYVIEW(dev))
- cherryview_init_gt_powersave(dev);
- else if (IS_VALLEYVIEW(dev))
- valleyview_init_gt_powersave(dev);
+ if (IS_CHERRYVIEW(dev_priv))
+ cherryview_init_gt_powersave(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_init_gt_powersave(dev_priv);
}
-void intel_cleanup_gt_powersave(struct drm_device *dev)
+void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
return;
- else if (IS_VALLEYVIEW(dev))
- valleyview_cleanup_gt_powersave(dev);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_cleanup_gt_powersave(dev_priv);
if (!i915.enable_rc6)
intel_runtime_pm_put(dev_priv);
}
-static void gen6_suspend_rps(struct drm_device *dev)
+static void gen6_suspend_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
- gen6_disable_rps_interrupts(dev);
+ gen6_disable_rps_interrupts(dev_priv);
}
/**
* intel_suspend_gt_powersave - suspend PM work and helper threads
- * @dev: drm device
+ * @dev_priv: i915 device
*
* We don't want to disable RC6 or other features here, we just want
* to make sure any work we've queued has finished and won't bother
* us while we're suspended.
*/
-void intel_suspend_gt_powersave(struct drm_device *dev)
+void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return;
- gen6_suspend_rps(dev);
+ gen6_suspend_rps(dev_priv);
/* Force GPU to min freq during suspend */
gen6_rps_idle(dev_priv);
}
-void intel_disable_gt_powersave(struct drm_device *dev)
+void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_IRONLAKE_M(dev)) {
- ironlake_disable_drps(dev);
- } else if (INTEL_INFO(dev)->gen >= 6) {
- intel_suspend_gt_powersave(dev);
+ if (IS_IRONLAKE_M(dev_priv)) {
+ ironlake_disable_drps(dev_priv);
+ } else if (INTEL_INFO(dev_priv)->gen >= 6) {
+ intel_suspend_gt_powersave(dev_priv);
mutex_lock(&dev_priv->rps.hw_lock);
- if (INTEL_INFO(dev)->gen >= 9) {
- gen9_disable_rc6(dev);
- gen9_disable_rps(dev);
- } else if (IS_CHERRYVIEW(dev))
- cherryview_disable_rps(dev);
- else if (IS_VALLEYVIEW(dev))
- valleyview_disable_rps(dev);
+ if (INTEL_INFO(dev_priv)->gen >= 9) {
+ gen9_disable_rc6(dev_priv);
+ gen9_disable_rps(dev_priv);
+ } else if (IS_CHERRYVIEW(dev_priv))
+ cherryview_disable_rps(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_disable_rps(dev_priv);
else
- gen6_disable_rps(dev);
+ gen6_disable_rps(dev_priv);
dev_priv->rps.enabled = false;
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -6308,27 +6555,26 @@ static void intel_gen6_powersave_work(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
rps.delayed_resume_work.work);
- struct drm_device *dev = dev_priv->dev;
mutex_lock(&dev_priv->rps.hw_lock);
- gen6_reset_rps_interrupts(dev);
+ gen6_reset_rps_interrupts(dev_priv);
- if (IS_CHERRYVIEW(dev)) {
- cherryview_enable_rps(dev);
- } else if (IS_VALLEYVIEW(dev)) {
- valleyview_enable_rps(dev);
- } else if (INTEL_INFO(dev)->gen >= 9) {
- gen9_enable_rc6(dev);
- gen9_enable_rps(dev);
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
- __gen6_update_ring_freq(dev);
- } else if (IS_BROADWELL(dev)) {
- gen8_enable_rps(dev);
- __gen6_update_ring_freq(dev);
+ if (IS_CHERRYVIEW(dev_priv)) {
+ cherryview_enable_rps(dev_priv);
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ valleyview_enable_rps(dev_priv);
+ } else if (INTEL_INFO(dev_priv)->gen >= 9) {
+ gen9_enable_rc6(dev_priv);
+ gen9_enable_rps(dev_priv);
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ __gen6_update_ring_freq(dev_priv);
+ } else if (IS_BROADWELL(dev_priv)) {
+ gen8_enable_rps(dev_priv);
+ __gen6_update_ring_freq(dev_priv);
} else {
- gen6_enable_rps(dev);
- __gen6_update_ring_freq(dev);
+ gen6_enable_rps(dev_priv);
+ __gen6_update_ring_freq(dev_priv);
}
WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
@@ -6339,27 +6585,25 @@ static void intel_gen6_powersave_work(struct work_struct *work)
dev_priv->rps.enabled = true;
- gen6_enable_rps_interrupts(dev);
+ gen6_enable_rps_interrupts(dev_priv);
mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
}
-void intel_enable_gt_powersave(struct drm_device *dev)
+void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* Powersaving is controlled by the host when inside a VM */
- if (intel_vgpu_active(dev))
+ if (intel_vgpu_active(dev_priv))
return;
- if (IS_IRONLAKE_M(dev)) {
- ironlake_enable_drps(dev);
- mutex_lock(&dev->struct_mutex);
- intel_init_emon(dev);
- mutex_unlock(&dev->struct_mutex);
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ if (IS_IRONLAKE_M(dev_priv)) {
+ ironlake_enable_drps(dev_priv);
+ mutex_lock(&dev_priv->dev->struct_mutex);
+ intel_init_emon(dev_priv);
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+ } else if (INTEL_INFO(dev_priv)->gen >= 6) {
/*
* PCU communication is slow and this doesn't need to be
* done at any specific time, so do this out of our fast path
@@ -6378,14 +6622,12 @@ void intel_enable_gt_powersave(struct drm_device *dev)
}
}
-void intel_reset_gt_powersave(struct drm_device *dev)
+void intel_reset_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_INFO(dev_priv)->gen < 6)
return;
- gen6_suspend_rps(dev);
+ gen6_suspend_rps(dev_priv);
dev_priv->rps.enabled = false;
}
@@ -6698,11 +6940,42 @@ static void lpt_suspend_hw(struct drm_device *dev)
}
}
+static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
+ int general_prio_credits,
+ int high_prio_credits)
+{
+ u32 misccpctl;
+
+ /* WaTempDisableDOPClkGating:bdw */
+ misccpctl = I915_READ(GEN7_MISCCPCTL);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+
+ I915_WRITE(GEN8_L3SQCREG1,
+ L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
+ L3_HIGH_PRIO_CREDITS(high_prio_credits));
+
+ /*
+ * Wait at least 100 clocks before re-enabling clock gating.
+ * See the definition of L3SQCREG1 in BSpec.
+ */
+ POSTING_READ(GEN8_L3SQCREG1);
+ udelay(1);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+}
+
+static void skylake_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,kbl */
+ I915_WRITE(CHICKEN_PAR1_1,
+ I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
+}
+
static void broadwell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
- uint32_t misccpctl;
ilk_init_lp_watermarks(dev);
@@ -6733,20 +7006,8 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
- /*
- * WaProgramL3SqcReg1Default:bdw
- * WaTempDisableDOPClkGating:bdw
- */
- misccpctl = I915_READ(GEN7_MISCCPCTL);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
- I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
- /*
- * Wait at least 100 clocks before re-enabling clock gating. See
- * the definition of L3SQCREG1 in BSpec.
- */
- POSTING_READ(GEN8_L3SQCREG1);
- udelay(1);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+ /* WaProgramL3SqcReg1Default:bdw */
+ gen8_set_l3sqc_credits(dev_priv, 30, 2);
/*
* WaGttCachingOffByDefault:bdw
@@ -7017,6 +7278,13 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/*
+ * WaProgramL3SqcReg1Default:chv
+ * See gfxspecs/Related Documents/Performance Guide/
+ * LSQC Setting Recommendations.
+ */
+ gen8_set_l3sqc_credits(dev_priv, 38, 2);
+
+ /*
* GTT cache may not work with big pages, so if those
* are ever enabled GTT cache may need to be disabled.
*/
@@ -7163,9 +7431,9 @@ static void nop_init_clock_gating(struct drm_device *dev)
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
if (IS_SKYLAKE(dev_priv))
- dev_priv->display.init_clock_gating = nop_init_clock_gating;
+ dev_priv->display.init_clock_gating = skylake_init_clock_gating;
else if (IS_KABYLAKE(dev_priv))
- dev_priv->display.init_clock_gating = nop_init_clock_gating;
+ dev_priv->display.init_clock_gating = skylake_init_clock_gating;
else if (IS_BROXTON(dev_priv))
dev_priv->display.init_clock_gating = bxt_init_clock_gating;
else if (IS_BROADWELL(dev_priv))
@@ -7217,6 +7485,7 @@ void intel_init_pm(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 9) {
skl_setup_wm_latency(dev);
dev_priv->display.update_wm = skl_update_wm;
+ dev_priv->display.compute_global_watermarks = skl_compute_wm;
} else if (HAS_PCH_SPLIT(dev)) {
ilk_setup_wm_latency(dev);
@@ -7390,19 +7659,17 @@ static void __intel_rps_boost_work(struct work_struct *work)
struct drm_i915_gem_request *req = boost->req;
if (!i915_gem_request_completed(req, true))
- gen6_rps_boost(to_i915(req->engine->dev), NULL,
- req->emitted_jiffies);
+ gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
- i915_gem_request_unreference__unlocked(req);
+ i915_gem_request_unreference(req);
kfree(boost);
}
-void intel_queue_rps_boost_for_request(struct drm_device *dev,
- struct drm_i915_gem_request *req)
+void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
{
struct request_boost *boost;
- if (req == NULL || INTEL_INFO(dev)->gen < 6)
+ if (req == NULL || INTEL_GEN(req->i915) < 6)
return;
if (i915_gem_request_completed(req, true))
@@ -7416,7 +7683,7 @@ void intel_queue_rps_boost_for_request(struct drm_device *dev,
boost->req = req;
INIT_WORK(&boost->work, __intel_rps_boost_work);
- queue_work(to_i915(dev)->wq, &boost->work);
+ queue_work(req->i915->wq, &boost->work);
}
void intel_pm_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index a788d1e9589b..29a09bf6bd18 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -176,7 +176,6 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider;
i915_reg_t aux_ctl_reg;
- int precharge = 0x3;
static const uint8_t aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4,
[1] = DP_SET_POWER >> 8,
@@ -185,6 +184,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
[4] = DP_SET_POWER_D0,
};
enum port port = dig_port->port;
+ u32 aux_ctl;
int i;
BUILD_BUG_ON(sizeof(aux_msg) > 20);
@@ -197,6 +197,13 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
DP_AUX_FRAME_SYNC_ENABLE);
+ if (dev_priv->psr.link_standby)
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
+ else
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE);
+
aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
/* Setup AUX registers */
@@ -204,33 +211,9 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
- if (INTEL_INFO(dev)->gen >= 9) {
- uint32_t val;
-
- val = I915_READ(aux_ctl_reg);
- val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
- val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
- val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
- val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
- /* Use hardcoded data values for PSR, frame sync and GTC */
- val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
- val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL;
- val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL;
- I915_WRITE(aux_ctl_reg, val);
- } else {
- I915_WRITE(aux_ctl_reg,
- DP_AUX_CH_CTL_TIME_OUT_400us |
- (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
- (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
- }
-
- if (dev_priv->psr.link_standby)
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
- else
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE);
+ aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
+ aux_clock_divider);
+ I915_WRITE(aux_ctl_reg, aux_ctl);
}
static void vlv_psr_enable_source(struct intel_dp *intel_dp)
@@ -272,14 +255,14 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t max_sleep_time = 0x1f;
- /*
- * Let's respect VBT in case VBT asks a higher idle_frame value.
- * Let's use 6 as the minimum to cover all known cases including
- * the off-by-one issue that HW has in some cases. Also there are
- * cases where sink should be able to train
- * with the 5 or 6 idle patterns.
+ /* Lately it was identified that depending on panel idle frame count
+ * calculated at HW can be off by 1. So let's use what came
+ * from VBT + 1.
+ * There are also other cases where panel demands at least 4
+ * but VBT is not being set. To cover these 2 cases lets use
+ * at least 5 when VBT isn't set to be on the safest side.
*/
- uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+ uint32_t idle_frames = dev_priv->vbt.psr.idle_frames + 1;
uint32_t val = EDP_PSR_ENABLE;
val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 04402bb9d26b..8d35a3978f9b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,6 +34,11 @@
#include "i915_trace.h"
#include "intel_drv.h"
+/* Rough estimate of the typical request size, performing a flush,
+ * set-context and then emitting the batch.
+ */
+#define LEGACY_REQUEST_SIZE 200
+
int __intel_ring_space(int head, int tail, int size)
{
int space = head - tail;
@@ -55,7 +60,7 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
bool intel_engine_stopped(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
}
@@ -101,7 +106,6 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
u32 flush_domains)
{
struct intel_engine_cs *engine = req->engine;
- struct drm_device *dev = engine->dev;
u32 cmd;
int ret;
@@ -140,7 +144,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
cmd |= MI_EXE_FLUSH;
if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
- (IS_G4X(dev) || IS_GEN5(dev)))
+ (IS_G4X(req->i915) || IS_GEN5(req->i915)))
cmd |= MI_INVALIDATE_ISP;
ret = intel_ring_begin(req, 2);
@@ -426,19 +430,19 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
static void ring_write_tail(struct intel_engine_cs *engine,
u32 value)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_TAIL(engine, value);
}
u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u64 acthd;
- if (INTEL_INFO(engine->dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
RING_ACTHD_UDW(engine->mmio_base));
- else if (INTEL_INFO(engine->dev)->gen >= 4)
+ else if (INTEL_GEN(dev_priv) >= 4)
acthd = I915_READ(RING_ACTHD(engine->mmio_base));
else
acthd = I915_READ(ACTHD);
@@ -448,25 +452,24 @@ u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u32 addr;
addr = dev_priv->status_page_dmah->busaddr;
- if (INTEL_INFO(engine->dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
I915_WRITE(HWS_PGA, addr);
}
static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
i915_reg_t mmio;
/* The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7.
*/
- if (IS_GEN7(dev)) {
+ if (IS_GEN7(dev_priv)) {
switch (engine->id) {
case RCS:
mmio = RENDER_HWS_PGA_GEN7;
@@ -486,7 +489,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN6(engine->dev)) {
+ } else if (IS_GEN6(dev_priv)) {
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
@@ -503,7 +506,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
* arises: do we still need this and if so how should we go about
* invalidating the TLB?
*/
- if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
+ if (IS_GEN(dev_priv, 6, 7)) {
i915_reg_t reg = RING_INSTPM(engine->mmio_base);
/* ring should be idle before issuing a sync flush*/
@@ -521,9 +524,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
static bool stop_ring(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
- if (!IS_GEN2(engine->dev)) {
+ if (!IS_GEN2(dev_priv)) {
I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
DRM_ERROR("%s : timed out trying to stop ring\n",
@@ -541,7 +544,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
I915_WRITE_HEAD(engine, 0);
engine->write_tail(engine, 0);
- if (!IS_GEN2(engine->dev)) {
+ if (!IS_GEN2(dev_priv)) {
(void)I915_READ_CTL(engine);
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
}
@@ -556,8 +559,7 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
static int init_ring_common(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
struct intel_ringbuffer *ringbuf = engine->buffer;
struct drm_i915_gem_object *obj = ringbuf->obj;
int ret = 0;
@@ -587,7 +589,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
}
}
- if (I915_NEED_GFX_HWS(dev))
+ if (I915_NEED_GFX_HWS(dev_priv))
intel_ring_setup_status_page(engine);
else
ring_setup_phys_status_page(engine);
@@ -644,12 +646,10 @@ out:
void
intel_fini_pipe_control(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
-
if (engine->scratch.obj == NULL)
return;
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_GEN(engine->i915) >= 5) {
kunmap(sg_page(engine->scratch.obj->pages->sgl));
i915_gem_object_ggtt_unpin(engine->scratch.obj);
}
@@ -665,10 +665,11 @@ intel_init_pipe_control(struct intel_engine_cs *engine)
WARN_ON(engine->scratch.obj);
- engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096);
- if (engine->scratch.obj == NULL) {
+ engine->scratch.obj = i915_gem_object_create(engine->i915->dev, 4096);
+ if (IS_ERR(engine->scratch.obj)) {
DRM_ERROR("Failed to allocate seqno page\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(engine->scratch.obj);
+ engine->scratch.obj = NULL;
goto err;
}
@@ -702,11 +703,9 @@ err:
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
- int ret, i;
struct intel_engine_cs *engine = req->engine;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_workarounds *w = &dev_priv->workarounds;
+ struct i915_workarounds *w = &req->i915->workarounds;
+ int ret, i;
if (w->count == 0)
return 0;
@@ -795,7 +794,7 @@ static int wa_add(struct drm_i915_private *dev_priv,
static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
i915_reg_t reg)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
struct i915_workarounds *wa = &dev_priv->workarounds;
const uint32_t index = wa->hw_whitelist_count[engine->id];
@@ -811,8 +810,7 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
static int gen8_init_workarounds(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
@@ -863,9 +861,8 @@ static int gen8_init_workarounds(struct intel_engine_cs *engine)
static int bdw_init_workarounds(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
ret = gen8_init_workarounds(engine);
if (ret)
@@ -885,16 +882,15 @@ static int bdw_init_workarounds(struct intel_engine_cs *engine)
/* WaForceContextSaveRestoreNonCoherent:bdw */
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
- (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+ (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
return 0;
}
static int chv_init_workarounds(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
ret = gen8_init_workarounds(engine);
if (ret)
@@ -911,8 +907,7 @@ static int chv_init_workarounds(struct intel_engine_cs *engine)
static int gen9_init_workarounds(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
uint32_t tmp;
int ret;
@@ -935,14 +930,14 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_DG_MIRROR_FIX_ENABLE);
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
/*
@@ -968,20 +963,20 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
GEN9_CCS_TLB_PREFETCH_ENABLE);
/* WaDisableMaskBasedCammingInRCC:skl,bxt */
- if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
- if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
- IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_F0, REVID_FOREVER) ||
+ IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
- if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+ if (IS_SKYLAKE(dev_priv) || IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
@@ -1007,8 +1002,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
@@ -1049,9 +1043,8 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
static int skl_init_workarounds(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
ret = gen9_init_workarounds(engine);
if (ret)
@@ -1062,12 +1055,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
* until D0 which is the default case so this is equivalent to
* !WaDisablePerCtxtPreemptionGranularityControl:skl
*/
- if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) {
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
_MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
}
- if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0)) {
/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
I915_WRITE(FF_SLICE_CS_CHICKEN2,
_MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
@@ -1076,24 +1069,24 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
* involving this register should also be added to WA batch as required.
*/
- if (IS_SKL_REVID(dev, 0, SKL_REVID_E0))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
/* WaDisableLSQCROPERFforOCL:skl */
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_RO_PERF_DIS);
/* WaEnableGapsTsvCreditFix:skl */
- if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) {
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
GEN9_GAPS_TSV_CREDIT_DISABLE));
}
/* WaDisablePowerCompilerClockGating:skl */
- if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0))
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
/* This is tied to WaForceContextSaveRestoreNonCoherent */
- if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
+ if (IS_SKL_REVID(dev_priv, 0, REVID_FOREVER)) {
/*
*Use Force Non-Coherent whenever executing a 3D context. This
* is a workaround for a possible hang in the unlikely event
@@ -1109,13 +1102,13 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
}
/* WaBarrierPerformanceFixDisable:skl */
- if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FENCE_DEST_SLM_DISABLE |
HDC_BARRIER_PERFORMANCE_DISABLE);
/* WaDisableSbeCacheDispatchPortSharing:skl */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_F0))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1130,9 +1123,8 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
static int bxt_init_workarounds(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
ret = gen9_init_workarounds(engine);
if (ret)
@@ -1140,11 +1132,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
/* WaStoreMultiplePTEenable:bxt */
/* This is a requirement according to Hardware specification */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
/* WaSetClckGatingDisableMedia:bxt */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
}
@@ -1154,7 +1146,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
STALL_DOP_GATING_DISABLE);
/* WaDisableSbeCacheDispatchPortSharing:bxt */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1164,7 +1156,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
/* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
/* WaDisableObjectLevelPreemtionForInstanceId:bxt */
/* WaDisableLSQCROPERFforOCL:bxt */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
if (ret)
return ret;
@@ -1174,29 +1166,33 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
return ret;
}
+ /* WaProgramL3SqcReg1DefaultForPerf:bxt */
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
+ I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
+ L3_HIGH_PRIO_CREDITS(2));
+
return 0;
}
int init_workarounds_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
WARN_ON(engine->id != RCS);
dev_priv->workarounds.count = 0;
dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
- if (IS_BROADWELL(dev))
+ if (IS_BROADWELL(dev_priv))
return bdw_init_workarounds(engine);
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
return chv_init_workarounds(engine);
- if (IS_SKYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv))
return skl_init_workarounds(engine);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
return bxt_init_workarounds(engine);
return 0;
@@ -1204,14 +1200,13 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
static int init_render_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
int ret = init_ring_common(engine);
if (ret)
return ret;
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
- if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
+ if (IS_GEN(dev_priv, 4, 6))
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
/* We need to disable the AsyncFlip performance optimisations in order
@@ -1220,22 +1215,22 @@ static int init_render_ring(struct intel_engine_cs *engine)
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
*/
- if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+ if (IS_GEN(dev_priv, 6, 7))
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
/* Required for the hardware to program scanline values for waiting */
/* WaEnableFlushTlbInvalidationMode:snb */
- if (INTEL_INFO(dev)->gen == 6)
+ if (IS_GEN6(dev_priv))
I915_WRITE(GFX_MODE,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
- if (IS_GEN7(dev))
+ if (IS_GEN7(dev_priv))
I915_WRITE(GFX_MODE_GEN7,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev_priv)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
* policy. [...] This bit must be reset. LRA replacement
@@ -1245,19 +1240,18 @@ static int init_render_ring(struct intel_engine_cs *engine)
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
}
- if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+ if (IS_GEN(dev_priv, 6, 7))
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
- if (HAS_L3_DPF(dev))
- I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
+ if (HAS_L3_DPF(dev_priv))
+ I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
return init_workarounds_ring(engine);
}
static void render_ring_cleanup(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
if (dev_priv->semaphore_obj) {
i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
@@ -1273,13 +1267,12 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
{
#define MBOX_UPDATE_DWORDS 8
struct intel_engine_cs *signaller = signaller_req->engine;
- struct drm_device *dev = signaller->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = signaller_req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
int ret, num_rings;
- num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
#undef MBOX_UPDATE_DWORDS
@@ -1297,7 +1290,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_FLUSH_ENABLE);
+ PIPE_CONTROL_CS_STALL);
intel_ring_emit(signaller, lower_32_bits(gtt_offset));
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
intel_ring_emit(signaller, seqno);
@@ -1315,13 +1308,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
{
#define MBOX_UPDATE_DWORDS 6
struct intel_engine_cs *signaller = signaller_req->engine;
- struct drm_device *dev = signaller->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = signaller_req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
int ret, num_rings;
- num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
#undef MBOX_UPDATE_DWORDS
@@ -1354,14 +1346,13 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
unsigned int num_dwords)
{
struct intel_engine_cs *signaller = signaller_req->engine;
- struct drm_device *dev = signaller->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = signaller_req->i915;
struct intel_engine_cs *useless;
enum intel_engine_id id;
int ret, num_rings;
#define MBOX_UPDATE_DWORDS 3
- num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
#undef MBOX_UPDATE_DWORDS
@@ -1420,10 +1411,38 @@ gen6_add_request(struct drm_i915_gem_request *req)
return 0;
}
-static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
+static int
+gen8_render_add_request(struct drm_i915_gem_request *req)
+{
+ struct intel_engine_cs *engine = req->engine;
+ int ret;
+
+ if (engine->semaphore.signal)
+ ret = engine->semaphore.signal(req, 8);
+ else
+ ret = intel_ring_begin(req, 8);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE));
+ intel_ring_emit(engine, intel_hws_seqno_address(req->engine));
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+ /* We're thrashing one dword of HWS. */
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, MI_USER_INTERRUPT);
+ intel_ring_emit(engine, MI_NOOP);
+ __intel_ring_advance(engine);
+
+ return 0;
+}
+
+static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
u32 seqno)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
return dev_priv->last_seqno < seqno;
}
@@ -1441,7 +1460,8 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
u32 seqno)
{
struct intel_engine_cs *waiter = waiter_req->engine;
- struct drm_i915_private *dev_priv = waiter->dev->dev_private;
+ struct drm_i915_private *dev_priv = waiter_req->i915;
+ struct i915_hw_ppgtt *ppgtt;
int ret;
ret = intel_ring_begin(waiter_req, 4);
@@ -1450,7 +1470,6 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_GTE_SDD);
intel_ring_emit(waiter, seqno);
intel_ring_emit(waiter,
@@ -1458,6 +1477,15 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
intel_ring_emit(waiter,
upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
intel_ring_advance(waiter);
+
+ /* When the !RCS engines idle waiting upon a semaphore, they lose their
+ * pagetables and we must reload them before executing the batch.
+ * We do this on the i915_switch_context() following the wait and
+ * before the dispatch.
+ */
+ ppgtt = waiter_req->ctx->ppgtt;
+ if (ppgtt && waiter_req->engine->id != RCS)
+ ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine);
return 0;
}
@@ -1486,7 +1514,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
return ret;
/* If seqno wrap happened, omit the wait with no-ops */
- if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
+ if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
intel_ring_emit(waiter, dw1 | wait_mbox);
intel_ring_emit(waiter, seqno);
intel_ring_emit(waiter, 0);
@@ -1567,7 +1595,7 @@ pc_render_add_request(struct drm_i915_gem_request *req)
static void
gen6_seqno_barrier(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
@@ -1616,8 +1644,7 @@ pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno)
static bool
gen5_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1634,8 +1661,7 @@ gen5_ring_get_irq(struct intel_engine_cs *engine)
static void
gen5_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1647,8 +1673,7 @@ gen5_ring_put_irq(struct intel_engine_cs *engine)
static bool
i9xx_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
if (!intel_irqs_enabled(dev_priv))
@@ -1668,8 +1693,7 @@ i9xx_ring_get_irq(struct intel_engine_cs *engine)
static void
i9xx_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1684,8 +1708,7 @@ i9xx_ring_put_irq(struct intel_engine_cs *engine)
static bool
i8xx_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
if (!intel_irqs_enabled(dev_priv))
@@ -1705,8 +1728,7 @@ i8xx_ring_get_irq(struct intel_engine_cs *engine)
static void
i8xx_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1759,8 +1781,7 @@ i9xx_add_request(struct drm_i915_gem_request *req)
static bool
gen6_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1768,10 +1789,10 @@ gen6_ring_get_irq(struct intel_engine_cs *engine)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (engine->irq_refcount++ == 0) {
- if (HAS_L3_DPF(dev) && engine->id == RCS)
+ if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
I915_WRITE_IMR(engine,
~(engine->irq_enable_mask |
- GT_PARITY_ERROR(dev)));
+ GT_PARITY_ERROR(dev_priv)));
else
I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
@@ -1784,14 +1805,13 @@ gen6_ring_get_irq(struct intel_engine_cs *engine)
static void
gen6_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--engine->irq_refcount == 0) {
- if (HAS_L3_DPF(dev) && engine->id == RCS)
- I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
+ if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
+ I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
else
I915_WRITE_IMR(engine, ~0);
gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
@@ -1802,8 +1822,7 @@ gen6_ring_put_irq(struct intel_engine_cs *engine)
static bool
hsw_vebox_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1822,8 +1841,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *engine)
static void
hsw_vebox_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1837,8 +1855,7 @@ hsw_vebox_put_irq(struct intel_engine_cs *engine)
static bool
gen8_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1846,7 +1863,7 @@ gen8_ring_get_irq(struct intel_engine_cs *engine)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (engine->irq_refcount++ == 0) {
- if (HAS_L3_DPF(dev) && engine->id == RCS) {
+ if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
I915_WRITE_IMR(engine,
~(engine->irq_enable_mask |
GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
@@ -1863,13 +1880,12 @@ gen8_ring_get_irq(struct intel_engine_cs *engine)
static void
gen8_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--engine->irq_refcount == 0) {
- if (HAS_L3_DPF(dev) && engine->id == RCS) {
+ if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
I915_WRITE_IMR(engine,
~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
} else {
@@ -1991,12 +2007,12 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
static void cleanup_phys_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
if (!dev_priv->status_page_dmah)
return;
- drm_pci_free(engine->dev, dev_priv->status_page_dmah);
+ drm_pci_free(dev_priv->dev, dev_priv->status_page_dmah);
engine->status_page.page_addr = NULL;
}
@@ -2022,10 +2038,10 @@ static int init_status_page(struct intel_engine_cs *engine)
unsigned flags;
int ret;
- obj = i915_gem_alloc_object(engine->dev, 4096);
- if (obj == NULL) {
+ obj = i915_gem_object_create(engine->i915->dev, 4096);
+ if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate status page\n");
- return -ENOMEM;
+ return PTR_ERR(obj);
}
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
@@ -2033,7 +2049,7 @@ static int init_status_page(struct intel_engine_cs *engine)
goto err_unref;
flags = 0;
- if (!HAS_LLC(engine->dev))
+ if (!HAS_LLC(engine->i915))
/* On g33, we cannot place HWS above 256MiB, so
* restrict its pinning to the low mappable arena.
* Though this restriction is not documented for
@@ -2067,11 +2083,11 @@ err_unref:
static int init_phys_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
if (!dev_priv->status_page_dmah) {
dev_priv->status_page_dmah =
- drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE);
+ drm_pci_alloc(dev_priv->dev, PAGE_SIZE, PAGE_SIZE);
if (!dev_priv->status_page_dmah)
return -ENOMEM;
}
@@ -2084,20 +2100,22 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
{
+ GEM_BUG_ON(ringbuf->vma == NULL);
+ GEM_BUG_ON(ringbuf->virtual_start == NULL);
+
if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
i915_gem_object_unpin_map(ringbuf->obj);
else
- iounmap(ringbuf->virtual_start);
+ i915_vma_unpin_iomap(ringbuf->vma);
ringbuf->virtual_start = NULL;
- ringbuf->vma = NULL;
+
i915_gem_object_ggtt_unpin(ringbuf->obj);
+ ringbuf->vma = NULL;
}
-int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
struct intel_ringbuffer *ringbuf)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj = ringbuf->obj;
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
unsigned flags = PIN_OFFSET_BIAS | 4096;
@@ -2131,10 +2149,9 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
/* Access through the GTT requires the device to be awake. */
assert_rpm_wakelock_held(dev_priv);
- addr = ioremap_wc(ggtt->mappable_base +
- i915_gem_obj_ggtt_offset(obj), ringbuf->size);
- if (addr == NULL) {
- ret = -ENOMEM;
+ addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj));
+ if (IS_ERR(addr)) {
+ ret = PTR_ERR(addr);
goto err_unpin;
}
}
@@ -2163,9 +2180,9 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
if (!HAS_LLC(dev))
obj = i915_gem_object_create_stolen(dev, ringbuf->size);
if (obj == NULL)
- obj = i915_gem_alloc_object(dev, ringbuf->size);
- if (obj == NULL)
- return -ENOMEM;
+ obj = i915_gem_object_create(dev, ringbuf->size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
/* mark ring buffers as read-only from GPU side by default */
obj->gt_ro = 1;
@@ -2197,13 +2214,13 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
* of the buffer.
*/
ring->effective_size = size;
- if (IS_I830(engine->dev) || IS_845G(engine->dev))
+ if (IS_I830(engine->i915) || IS_845G(engine->i915))
ring->effective_size -= 2 * CACHELINE_BYTES;
ring->last_retired_head = -1;
intel_ring_update_space(ring);
- ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
+ ret = intel_alloc_ringbuffer_obj(engine->i915->dev, ring);
if (ret) {
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
engine->name, ret);
@@ -2226,12 +2243,13 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring)
static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_ringbuffer *ringbuf;
int ret;
WARN_ON(engine->buffer);
- engine->dev = dev;
+ engine->i915 = dev_priv;
INIT_LIST_HEAD(&engine->active_list);
INIT_LIST_HEAD(&engine->request_list);
INIT_LIST_HEAD(&engine->execlist_queue);
@@ -2249,7 +2267,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
}
engine->buffer = ringbuf;
- if (I915_NEED_GFX_HWS(dev)) {
+ if (I915_NEED_GFX_HWS(dev_priv)) {
ret = init_status_page(engine);
if (ret)
goto error;
@@ -2260,7 +2278,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto error;
}
- ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+ ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
if (ret) {
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
engine->name, ret);
@@ -2286,11 +2304,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
if (!intel_engine_initialized(engine))
return;
- dev_priv = to_i915(engine->dev);
+ dev_priv = engine->i915;
if (engine->buffer) {
intel_stop_engine(engine);
- WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
+ WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
intel_unpin_ringbuffer_obj(engine->buffer);
intel_ringbuffer_free(engine->buffer);
@@ -2300,7 +2318,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
if (engine->cleanup)
engine->cleanup(engine);
- if (I915_NEED_GFX_HWS(engine->dev)) {
+ if (I915_NEED_GFX_HWS(dev_priv)) {
cleanup_status_page(engine);
} else {
WARN_ON(engine->id != RCS);
@@ -2309,7 +2327,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
i915_cmd_parser_fini_ring(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
- engine->dev = NULL;
+ engine->i915 = NULL;
}
int intel_engine_idle(struct intel_engine_cs *engine)
@@ -2332,46 +2350,22 @@ int intel_engine_idle(struct intel_engine_cs *engine)
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
- request->ringbuf = request->engine->buffer;
- return 0;
-}
+ int ret;
-int intel_ring_reserve_space(struct drm_i915_gem_request *request)
-{
- /*
- * The first call merely notes the reserve request and is common for
- * all back ends. The subsequent localised _begin() call actually
- * ensures that the reservation is available. Without the begin, if
- * the request creator immediately submitted the request without
- * adding any commands to it then there might not actually be
- * sufficient room for the submission commands.
+ /* Flush enough space to reduce the likelihood of waiting after
+ * we start building the request - in which case we will just
+ * have to repeat work.
*/
- intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
+ request->reserved_space += LEGACY_REQUEST_SIZE;
- return intel_ring_begin(request, 0);
-}
-
-void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
-{
- GEM_BUG_ON(ringbuf->reserved_size);
- ringbuf->reserved_size = size;
-}
-
-void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
-{
- GEM_BUG_ON(!ringbuf->reserved_size);
- ringbuf->reserved_size = 0;
-}
+ request->ringbuf = request->engine->buffer;
-void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
-{
- GEM_BUG_ON(!ringbuf->reserved_size);
- ringbuf->reserved_size = 0;
-}
+ ret = intel_ring_begin(request, 0);
+ if (ret)
+ return ret;
-void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
-{
- GEM_BUG_ON(ringbuf->reserved_size);
+ request->reserved_space -= LEGACY_REQUEST_SIZE;
+ return 0;
}
static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
@@ -2393,7 +2387,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
*
* See also i915_gem_request_alloc() and i915_add_request().
*/
- GEM_BUG_ON(!ringbuf->reserved_size);
+ GEM_BUG_ON(!req->reserved_space);
list_for_each_entry(target, &engine->request_list, list) {
unsigned space;
@@ -2428,7 +2422,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
int total_bytes, wait_bytes;
bool need_wrap = false;
- total_bytes = bytes + ringbuf->reserved_size;
+ total_bytes = bytes + req->reserved_space;
if (unlikely(bytes > remain_usable)) {
/*
@@ -2444,7 +2438,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
* and only need to effectively wait for the reserved
* size space from the start of ringbuffer.
*/
- wait_bytes = remain_actual + ringbuf->reserved_size;
+ wait_bytes = remain_actual + req->reserved_space;
} else {
/* No wrapping required, just waiting. */
wait_bytes = total_bytes;
@@ -2501,7 +2495,7 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
/* Our semaphore implementation is strictly monotonic (i.e. we proceed
* so long as the semaphore value in the register/page is greater
@@ -2511,7 +2505,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
* the semaphore value, then when the seqno moves backwards all
* future waits will complete instantly (causing rendering corruption).
*/
- if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) {
+ if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
if (HAS_VEBOX(dev_priv))
@@ -2537,7 +2531,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
u32 value)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
/* Every tail move must follow the sequence below */
@@ -2579,7 +2573,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
return ret;
cmd = MI_FLUSH_DW;
- if (INTEL_INFO(engine->dev)->gen >= 8)
+ if (INTEL_GEN(req->i915) >= 8)
cmd += 1;
/* We always require a command barrier so that subsequent
@@ -2601,7 +2595,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
intel_ring_emit(engine, cmd);
intel_ring_emit(engine,
I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
- if (INTEL_INFO(engine->dev)->gen >= 8) {
+ if (INTEL_GEN(req->i915) >= 8) {
intel_ring_emit(engine, 0); /* upper addr */
intel_ring_emit(engine, 0); /* value */
} else {
@@ -2692,7 +2686,6 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate, u32 flush)
{
struct intel_engine_cs *engine = req->engine;
- struct drm_device *dev = engine->dev;
uint32_t cmd;
int ret;
@@ -2701,7 +2694,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
return ret;
cmd = MI_FLUSH_DW;
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(req->i915) >= 8)
cmd += 1;
/* We always require a command barrier so that subsequent
@@ -2722,7 +2715,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
intel_ring_emit(engine, cmd);
intel_ring_emit(engine,
I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(req->i915) >= 8) {
intel_ring_emit(engine, 0); /* upper addr */
intel_ring_emit(engine, 0); /* value */
} else {
@@ -2747,10 +2740,10 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
engine->hw_id = 0;
engine->mmio_base = RENDER_RING_BASE;
- if (INTEL_INFO(dev)->gen >= 8) {
- if (i915_semaphore_is_enabled(dev)) {
- obj = i915_gem_alloc_object(dev, 4096);
- if (obj == NULL) {
+ if (INTEL_GEN(dev_priv) >= 8) {
+ if (i915_semaphore_is_enabled(dev_priv)) {
+ obj = i915_gem_object_create(dev, 4096);
+ if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
i915.semaphores = 0;
} else {
@@ -2766,25 +2759,24 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
}
engine->init_context = intel_rcs_ctx_init;
- engine->add_request = gen6_add_request;
+ engine->add_request = gen8_render_add_request;
engine->flush = gen8_render_ring_flush;
engine->irq_get = gen8_ring_get_irq;
engine->irq_put = gen8_ring_put_irq;
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
- engine->irq_seqno_barrier = gen6_seqno_barrier;
engine->get_seqno = ring_get_seqno;
engine->set_seqno = ring_set_seqno;
- if (i915_semaphore_is_enabled(dev)) {
+ if (i915_semaphore_is_enabled(dev_priv)) {
WARN_ON(!dev_priv->semaphore_obj);
engine->semaphore.sync_to = gen8_ring_sync;
engine->semaphore.signal = gen8_rcs_signal;
GEN8_RING_SEMAPHORE_INIT(engine);
}
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
engine->init_context = intel_rcs_ctx_init;
engine->add_request = gen6_add_request;
engine->flush = gen7_render_ring_flush;
- if (INTEL_INFO(dev)->gen == 6)
+ if (IS_GEN6(dev_priv))
engine->flush = gen6_render_ring_flush;
engine->irq_get = gen6_ring_get_irq;
engine->irq_put = gen6_ring_put_irq;
@@ -2792,7 +2784,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
engine->irq_seqno_barrier = gen6_seqno_barrier;
engine->get_seqno = ring_get_seqno;
engine->set_seqno = ring_set_seqno;
- if (i915_semaphore_is_enabled(dev)) {
+ if (i915_semaphore_is_enabled(dev_priv)) {
engine->semaphore.sync_to = gen6_ring_sync;
engine->semaphore.signal = gen6_signal;
/*
@@ -2813,7 +2805,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
- } else if (IS_GEN5(dev)) {
+ } else if (IS_GEN5(dev_priv)) {
engine->add_request = pc_render_add_request;
engine->flush = gen4_render_ring_flush;
engine->get_seqno = pc_render_get_seqno;
@@ -2824,13 +2816,13 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
} else {
engine->add_request = i9xx_add_request;
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
engine->flush = gen2_render_ring_flush;
else
engine->flush = gen4_render_ring_flush;
engine->get_seqno = ring_get_seqno;
engine->set_seqno = ring_set_seqno;
- if (IS_GEN2(dev)) {
+ if (IS_GEN2(dev_priv)) {
engine->irq_get = i8xx_ring_get_irq;
engine->irq_put = i8xx_ring_put_irq;
} else {
@@ -2841,15 +2833,15 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
}
engine->write_tail = ring_write_tail;
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
- else if (IS_GEN8(dev))
+ else if (IS_GEN8(dev_priv))
engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
- else if (INTEL_INFO(dev)->gen >= 6)
+ else if (INTEL_GEN(dev_priv) >= 6)
engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
- else if (INTEL_INFO(dev)->gen >= 4)
+ else if (INTEL_GEN(dev_priv) >= 4)
engine->dispatch_execbuffer = i965_dispatch_execbuffer;
- else if (IS_I830(dev) || IS_845G(dev))
+ else if (IS_I830(dev_priv) || IS_845G(dev_priv))
engine->dispatch_execbuffer = i830_dispatch_execbuffer;
else
engine->dispatch_execbuffer = i915_dispatch_execbuffer;
@@ -2857,11 +2849,11 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
engine->cleanup = render_ring_cleanup;
/* Workaround batchbuffer to combat CS tlb bug. */
- if (HAS_BROKEN_CS_TLB(dev)) {
- obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
- if (obj == NULL) {
+ if (HAS_BROKEN_CS_TLB(dev_priv)) {
+ obj = i915_gem_object_create(dev, I830_WA_SIZE);
+ if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate batch bo\n");
- return -ENOMEM;
+ return PTR_ERR(obj);
}
ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
@@ -2879,7 +2871,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
if (ret)
return ret;
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_GEN(dev_priv) >= 5) {
ret = intel_init_pipe_control(engine);
if (ret)
return ret;
@@ -2899,24 +2891,24 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
engine->hw_id = 1;
engine->write_tail = ring_write_tail;
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
engine->mmio_base = GEN6_BSD_RING_BASE;
/* gen6 bsd needs a special wa for tail updates */
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
engine->write_tail = gen6_bsd_ring_write_tail;
engine->flush = gen6_bsd_ring_flush;
engine->add_request = gen6_add_request;
engine->irq_seqno_barrier = gen6_seqno_barrier;
engine->get_seqno = ring_get_seqno;
engine->set_seqno = ring_set_seqno;
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
engine->irq_get = gen8_ring_get_irq;
engine->irq_put = gen8_ring_put_irq;
engine->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
+ if (i915_semaphore_is_enabled(dev_priv)) {
engine->semaphore.sync_to = gen8_ring_sync;
engine->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT(engine);
@@ -2927,7 +2919,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
engine->irq_put = gen6_ring_put_irq;
engine->dispatch_execbuffer =
gen6_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
+ if (i915_semaphore_is_enabled(dev_priv)) {
engine->semaphore.sync_to = gen6_ring_sync;
engine->semaphore.signal = gen6_signal;
engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
@@ -2948,7 +2940,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
engine->add_request = i9xx_add_request;
engine->get_seqno = ring_get_seqno;
engine->set_seqno = ring_set_seqno;
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
engine->irq_get = gen5_ring_get_irq;
engine->irq_put = gen5_ring_put_irq;
@@ -2990,7 +2982,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
engine->irq_put = gen8_ring_put_irq;
engine->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
+ if (i915_semaphore_is_enabled(dev_priv)) {
engine->semaphore.sync_to = gen8_ring_sync;
engine->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT(engine);
@@ -3017,13 +3009,13 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
engine->irq_seqno_barrier = gen6_seqno_barrier;
engine->get_seqno = ring_get_seqno;
engine->set_seqno = ring_set_seqno;
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
engine->irq_get = gen8_ring_get_irq;
engine->irq_put = gen8_ring_put_irq;
engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
+ if (i915_semaphore_is_enabled(dev_priv)) {
engine->semaphore.sync_to = gen8_ring_sync;
engine->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT(engine);
@@ -3033,7 +3025,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
engine->irq_get = gen6_ring_get_irq;
engine->irq_put = gen6_ring_put_irq;
engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
+ if (i915_semaphore_is_enabled(dev_priv)) {
engine->semaphore.signal = gen6_signal;
engine->semaphore.sync_to = gen6_ring_sync;
/*
@@ -3078,13 +3070,13 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
engine->get_seqno = ring_get_seqno;
engine->set_seqno = ring_set_seqno;
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
engine->irq_get = gen8_ring_get_irq;
engine->irq_put = gen8_ring_put_irq;
engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
+ if (i915_semaphore_is_enabled(dev_priv)) {
engine->semaphore.sync_to = gen8_ring_sync;
engine->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT(engine);
@@ -3094,7 +3086,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
engine->irq_get = hsw_vebox_get_irq;
engine->irq_put = hsw_vebox_put_irq;
engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
+ if (i915_semaphore_is_enabled(dev_priv)) {
engine->semaphore.sync_to = gen6_ring_sync;
engine->semaphore.signal = gen6_signal;
engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ff126485d398..b33c876fed20 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -107,7 +107,6 @@ struct intel_ringbuffer {
int space;
int size;
int effective_size;
- int reserved_size;
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
@@ -120,7 +119,7 @@ struct intel_ringbuffer {
u32 last_retired_head;
};
-struct intel_context;
+struct i915_gem_context;
struct drm_i915_reg_table;
/*
@@ -142,7 +141,8 @@ struct i915_ctx_workarounds {
struct drm_i915_gem_object *obj;
};
-struct intel_engine_cs {
+struct intel_engine_cs {
+ struct drm_i915_private *i915;
const char *name;
enum intel_engine_id {
RCS = 0,
@@ -157,7 +157,6 @@ struct intel_engine_cs {
unsigned int hw_id;
unsigned int guc_id; /* XXX same as hw_id? */
u32 mmio_base;
- struct drm_device *dev;
struct intel_ringbuffer *buffer;
struct list_head buffers;
@@ -268,7 +267,6 @@ struct intel_engine_cs {
struct tasklet_struct irq_tasklet;
spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
struct list_head execlist_queue;
- struct list_head execlist_retired_req_list;
unsigned int fw_domains;
unsigned int next_context_status_buffer;
unsigned int idle_lite_restore_wa;
@@ -312,7 +310,7 @@ struct intel_engine_cs {
wait_queue_head_t irq_queue;
- struct intel_context *last_context;
+ struct i915_gem_context *last_context;
struct intel_ring_hangcheck hangcheck;
@@ -352,7 +350,7 @@ struct intel_engine_cs {
static inline bool
intel_engine_initialized(struct intel_engine_cs *engine)
{
- return engine->dev != NULL;
+ return engine->i915 != NULL;
}
static inline unsigned
@@ -427,7 +425,7 @@ intel_write_status_page(struct intel_engine_cs *engine,
struct intel_ringbuffer *
intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
-int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
struct intel_ringbuffer *ringbuf);
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
void intel_ringbuffer_free(struct intel_ringbuffer *ring);
@@ -486,26 +484,15 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
/*
* Arbitrary size for largest possible 'add request' sequence. The code paths
* are complex and variable. Empirical measurement shows that the worst case
- * is ILK at 136 words. Reserving too much is better than reserving too little
- * as that allows for corner cases that might have been missed. So the figure
- * has been rounded up to 160 words.
+ * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
+ * we need to allocate double the largest single packet within that emission
+ * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
*/
-#define MIN_SPACE_FOR_ADD_REQUEST 160
+#define MIN_SPACE_FOR_ADD_REQUEST 336
-/*
- * Reserve space in the ring to guarantee that the i915_add_request() call
- * will always have sufficient room to do its stuff. The request creation
- * code calls this automatically.
- */
-void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
-/* Cancel the reservation, e.g. because the request is being discarded. */
-void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
-/* Use the reserved space - for use by i915_add_request() only. */
-void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
-/* Finish with the reserved space - for use by i915_add_request() only. */
-void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
-
-/* Legacy ringbuffer specific portion of reservation code: */
-int intel_ring_reserve_space(struct drm_i915_gem_request *request);
+static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
+{
+ return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
+}
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 7fb1da4e7fc3..fe8faf30bda7 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -806,15 +806,27 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
}
+static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
+{
+ u32 tmp = I915_READ(DBUF_CTL);
+
+ WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
+ (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
+ "Unexpected DBuf power power state (0x%08x)\n", tmp);
+}
+
static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
- if (IS_BROXTON(dev_priv)) {
- broxton_cdclk_verify_state(dev_priv);
+ WARN_ON(dev_priv->cdclk_freq !=
+ dev_priv->display.get_display_clock_speed(dev_priv->dev));
+
+ gen9_assert_dbuf_enabled(dev_priv);
+
+ if (IS_BROXTON(dev_priv))
broxton_ddi_phy_verify_state(dev_priv);
- }
}
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
@@ -948,6 +960,11 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
*/
I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
I915_WRITE(CBR1_VLV, 0);
+
+ WARN_ON(dev_priv->rawclk_freq == 0);
+
+ I915_WRITE(RAWCLK_FREQ_VLV,
+ DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
}
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
@@ -2171,6 +2188,28 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock);
}
+static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
+ POSTING_READ(DBUF_CTL);
+
+ udelay(10);
+
+ if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
+ DRM_ERROR("DBuf power enable timeout\n");
+}
+
+static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
+ POSTING_READ(DBUF_CTL);
+
+ udelay(10);
+
+ if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
+ DRM_ERROR("DBuf power disable timeout!\n");
+}
+
static void skl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
@@ -2195,12 +2234,11 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
- if (!resume)
- return;
-
skl_init_cdclk(dev_priv);
- if (dev_priv->csr.dmc_payload)
+ gen9_dbuf_enable(dev_priv);
+
+ if (resume && dev_priv->csr.dmc_payload)
intel_csr_load_program(dev_priv);
}
@@ -2211,6 +2249,8 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ gen9_dbuf_disable(dev_priv);
+
skl_uninit_cdclk(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
@@ -2255,9 +2295,11 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
broxton_init_cdclk(dev_priv);
+
+ gen9_dbuf_enable(dev_priv);
+
broxton_ddi_phy_init(dev_priv);
- broxton_cdclk_verify_state(dev_priv);
broxton_ddi_phy_verify_state(dev_priv);
if (resume && dev_priv->csr.dmc_payload)
@@ -2272,6 +2314,9 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
broxton_ddi_phy_uninit(dev_priv);
+
+ gen9_dbuf_disable(dev_priv);
+
broxton_uninit_cdclk(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 2128fae5687d..1a71456bd12a 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2981,7 +2981,7 @@ bool intel_sdvo_init(struct drm_device *dev,
intel_encoder = &intel_sdvo->base;
intel_encoder->type = INTEL_OUTPUT_SDVO;
drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
- NULL);
+ "SDVO %c", port_name(port));
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 0f3e2303e0e9..324ccb06397d 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -80,9 +80,7 @@ static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
*/
void intel_pipe_update_start(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
- enum pipe pipe = crtc->pipe;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
@@ -139,8 +137,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
crtc->debug.scanline_start = scanline;
crtc->debug.start_vbl_time = ktime_get();
- crtc->debug.start_vbl_count =
- dev->driver->get_vblank_counter(dev, pipe);
+ crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
trace_i915_pipe_update_vblank_evaded(crtc);
}
@@ -154,14 +151,19 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
* re-enables interrupts and verifies the update was actually completed
* before a vblank using the value of @start_vbl_count.
*/
-void intel_pipe_update_end(struct intel_crtc *crtc)
+void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work)
{
- struct drm_device *dev = crtc->base.dev;
enum pipe pipe = crtc->pipe;
int scanline_end = intel_get_crtc_scanline(crtc);
- u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
+ u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
ktime_t end_vbl_time = ktime_get();
+ if (work) {
+ work->flip_queued_vblank = end_vbl_count;
+ smp_mb__before_atomic();
+ atomic_set(&work->pending, 1);
+ }
+
trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
local_irq_enable();
@@ -203,8 +205,6 @@ skl_update_plane(struct drm_plane *drm_plane,
uint32_t y = plane_state->src.y1 >> 16;
uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
- const struct intel_scaler *scaler =
- &crtc_state->scaler_state.scalers[plane_state->scaler_id];
plane_ctl = PLANE_CTL_ENABLE |
PLANE_CTL_PIPE_GAMMA_ENABLE |
@@ -260,13 +260,16 @@ skl_update_plane(struct drm_plane *drm_plane,
/* program plane scaler */
if (plane_state->scaler_id >= 0) {
- uint32_t ps_ctrl = 0;
int scaler_id = plane_state->scaler_id;
+ const struct intel_scaler *scaler;
DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
PS_PLANE_SEL(plane));
- ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode;
- I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
+
+ scaler = &crtc_state->scaler_state.scalers[scaler_id];
+
+ I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
+ PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode);
I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id),
@@ -1111,10 +1114,18 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
possible_crtcs = (1 << pipe);
- ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
- &intel_plane_funcs,
- plane_formats, num_plane_formats,
- DRM_PLANE_TYPE_OVERLAY, NULL);
+ if (INTEL_INFO(dev)->gen >= 9)
+ ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
+ &intel_plane_funcs,
+ plane_formats, num_plane_formats,
+ DRM_PLANE_TYPE_OVERLAY,
+ "plane %d%c", plane + 2, pipe_name(pipe));
+ else
+ ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
+ &intel_plane_funcs,
+ plane_formats, num_plane_formats,
+ DRM_PLANE_TYPE_OVERLAY,
+ "sprite %c", sprite_name(pipe, plane));
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 223129d3c765..1f3a0e1e1a1f 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1591,7 +1591,7 @@ intel_tv_init(struct drm_device *dev)
DRM_MODE_CONNECTOR_SVIDEO);
drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
- DRM_MODE_ENCODER_TVDAC, NULL);
+ DRM_MODE_ENCODER_TVDAC, "TV");
intel_encoder->compute_config = intel_tv_compute_config;
intel_encoder->get_config = intel_tv_get_config;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4f1dfe616856..c1ca458d688e 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -248,9 +248,9 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
+void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
+ bool restore)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain;
int retry_count = 100;
@@ -304,7 +304,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
if (fw)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
- if (IS_GEN6(dev) || IS_GEN7(dev))
+ if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
dev_priv->uncore.fifo_count =
fifo_free_entries(dev_priv);
}
@@ -400,43 +400,42 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
return false;
}
-static void __intel_uncore_early_sanitize(struct drm_device *dev,
+static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
bool restore_forcewake)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* clear out unclaimed reg detection bit */
if (check_for_unclaimed_mmio(dev_priv))
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
/* clear out old GT FIFO errors */
- if (IS_GEN6(dev) || IS_GEN7(dev))
+ if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
__raw_i915_write32(dev_priv, GTFIFODBG,
__raw_i915_read32(dev_priv, GTFIFODBG));
/* WaDisableShadowRegForCpd:chv */
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
__raw_i915_write32(dev_priv, GTFIFOCTL,
__raw_i915_read32(dev_priv, GTFIFOCTL) |
GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
GT_FIFO_CTL_RC6_POLICY_STALL);
}
- intel_uncore_forcewake_reset(dev, restore_forcewake);
+ intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
}
-void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
+void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
+ bool restore_forcewake)
{
- __intel_uncore_early_sanitize(dev, restore_forcewake);
- i915_check_and_clear_faults(dev);
+ __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
+ i915_check_and_clear_faults(dev_priv);
}
-void intel_uncore_sanitize(struct drm_device *dev)
+void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
{
- i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
+ i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
/* BIOS often leaves RC6 enabled, but disable it for hw init */
- intel_disable_gt_powersave(dev);
+ intel_disable_gt_powersave(dev_priv);
}
static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
@@ -1233,14 +1232,12 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
fw_domain_reset(d);
}
-static void intel_uncore_fw_domains_init(struct drm_device *dev)
+static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (INTEL_INFO(dev_priv)->gen <= 5)
return;
- if (IS_GEN9(dev)) {
+ if (IS_GEN9(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
@@ -1251,9 +1248,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
FORCEWAKE_ACK_BLITTER_GEN9);
fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
- if (!IS_CHERRYVIEW(dev))
+ if (!IS_CHERRYVIEW(dev_priv))
dev_priv->uncore.funcs.force_wake_put =
fw_domains_put_with_fifo;
else
@@ -1262,17 +1259,17 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
- } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
dev_priv->uncore.funcs.force_wake_put =
fw_domains_put_with_fifo;
else
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
- } else if (IS_IVYBRIDGE(dev)) {
+ } else if (IS_IVYBRIDGE(dev_priv)) {
u32 ecobus;
/* IVB configs may use multi-threaded forcewake */
@@ -1302,11 +1299,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_MT_ACK);
- mutex_lock(&dev->struct_mutex);
fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
- mutex_unlock(&dev->struct_mutex);
if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
@@ -1314,7 +1309,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
- } else if (IS_GEN6(dev)) {
+ } else if (IS_GEN6(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
dev_priv->uncore.funcs.force_wake_put =
@@ -1327,26 +1322,24 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
WARN_ON(dev_priv->uncore.fw_domains == 0);
}
-void intel_uncore_init(struct drm_device *dev)
+void intel_uncore_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- i915_check_vgpu(dev);
+ i915_check_vgpu(dev_priv);
intel_uncore_edram_detect(dev_priv);
- intel_uncore_fw_domains_init(dev);
- __intel_uncore_early_sanitize(dev, false);
+ intel_uncore_fw_domains_init(dev_priv);
+ __intel_uncore_early_sanitize(dev_priv, false);
dev_priv->uncore.unclaimed_mmio_check = 1;
- switch (INTEL_INFO(dev)->gen) {
+ switch (INTEL_INFO(dev_priv)->gen) {
default:
case 9:
ASSIGN_WRITE_MMIO_VFUNCS(gen9);
ASSIGN_READ_MMIO_VFUNCS(gen9);
break;
case 8:
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
ASSIGN_WRITE_MMIO_VFUNCS(chv);
ASSIGN_READ_MMIO_VFUNCS(chv);
@@ -1357,13 +1350,13 @@ void intel_uncore_init(struct drm_device *dev)
break;
case 7:
case 6:
- if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev_priv)) {
ASSIGN_WRITE_MMIO_VFUNCS(hsw);
} else {
ASSIGN_WRITE_MMIO_VFUNCS(gen6);
}
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv)) {
ASSIGN_READ_MMIO_VFUNCS(vlv);
} else {
ASSIGN_READ_MMIO_VFUNCS(gen6);
@@ -1381,24 +1374,24 @@ void intel_uncore_init(struct drm_device *dev)
break;
}
- if (intel_vgpu_active(dev)) {
+ if (intel_vgpu_active(dev_priv)) {
ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
ASSIGN_READ_MMIO_VFUNCS(vgpu);
}
- i915_check_and_clear_faults(dev);
+ i915_check_and_clear_faults(dev_priv);
}
#undef ASSIGN_WRITE_MMIO_VFUNCS
#undef ASSIGN_READ_MMIO_VFUNCS
-void intel_uncore_fini(struct drm_device *dev)
+void intel_uncore_fini(struct drm_i915_private *dev_priv)
{
/* Paranoia: make sure we have disabled everything before we exit. */
- intel_uncore_sanitize(dev);
- intel_uncore_forcewake_reset(dev, false);
+ intel_uncore_sanitize(dev_priv);
+ intel_uncore_forcewake_reset(dev_priv, false);
}
-#define GEN_RANGE(l, h) GENMASK(h, l)
+#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
static const struct register_whitelist {
i915_reg_t offset_ldw, offset_udw;
@@ -1423,7 +1416,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
- (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+ (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask))
break;
}
@@ -1467,83 +1460,47 @@ out:
return ret;
}
-int i915_get_reset_stats_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_reset_stats *args = data;
- struct i915_ctx_hang_stats *hs;
- struct intel_context *ctx;
- int ret;
-
- if (args->flags || args->pad)
- return -EINVAL;
-
- if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
- if (IS_ERR(ctx)) {
- mutex_unlock(&dev->struct_mutex);
- return PTR_ERR(ctx);
- }
- hs = &ctx->hang_stats;
-
- if (capable(CAP_SYS_ADMIN))
- args->reset_count = i915_reset_count(&dev_priv->gpu_error);
- else
- args->reset_count = 0;
-
- args->batch_active = hs->batch_active;
- args->batch_pending = hs->batch_pending;
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-static int i915_reset_complete(struct drm_device *dev)
+static int i915_reset_complete(struct pci_dev *pdev)
{
u8 gdrst;
- pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
+ pci_read_config_byte(pdev, I915_GDRST, &gdrst);
return (gdrst & GRDOM_RESET_STATUS) == 0;
}
-static int i915_do_reset(struct drm_device *dev, unsigned engine_mask)
+static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{
+ struct pci_dev *pdev = dev_priv->dev->pdev;
+
/* assert reset for at least 20 usec */
- pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+ pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
udelay(20);
- pci_write_config_byte(dev->pdev, I915_GDRST, 0);
+ pci_write_config_byte(pdev, I915_GDRST, 0);
- return wait_for(i915_reset_complete(dev), 500);
+ return wait_for(i915_reset_complete(pdev), 500);
}
-static int g4x_reset_complete(struct drm_device *dev)
+static int g4x_reset_complete(struct pci_dev *pdev)
{
u8 gdrst;
- pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
+ pci_read_config_byte(pdev, I915_GDRST, &gdrst);
return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
-static int g33_do_reset(struct drm_device *dev, unsigned engine_mask)
+static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{
- pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
- return wait_for(g4x_reset_complete(dev), 500);
+ struct pci_dev *pdev = dev_priv->dev->pdev;
+ pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+ return wait_for(g4x_reset_complete(pdev), 500);
}
-static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
+static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev_priv->dev->pdev;
int ret;
- pci_write_config_byte(dev->pdev, I915_GDRST,
+ pci_write_config_byte(pdev, I915_GDRST,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
- ret = wait_for(g4x_reset_complete(dev), 500);
+ ret = wait_for(g4x_reset_complete(pdev), 500);
if (ret)
return ret;
@@ -1551,9 +1508,9 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
- pci_write_config_byte(dev->pdev, I915_GDRST,
+ pci_write_config_byte(pdev, I915_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
- ret = wait_for(g4x_reset_complete(dev), 500);
+ ret = wait_for(g4x_reset_complete(pdev), 500);
if (ret)
return ret;
@@ -1561,14 +1518,14 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
- pci_write_config_byte(dev->pdev, I915_GDRST, 0);
+ pci_write_config_byte(pdev, I915_GDRST, 0);
return 0;
}
-static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask)
+static int ironlake_do_reset(struct drm_i915_private *dev_priv,
+ unsigned engine_mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
I915_WRITE(ILK_GDSR,
@@ -1612,7 +1569,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
/**
* gen6_reset_engines - reset individual engines
- * @dev: DRM device
+ * @dev_priv: i915 device
* @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
*
* This function will reset the individual engines that are set in engine_mask.
@@ -1623,9 +1580,9 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
*
* Returns 0 on success, nonzero on error.
*/
-static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask)
+static int gen6_reset_engines(struct drm_i915_private *dev_priv,
+ unsigned engine_mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
const u32 hw_engine_mask[I915_NUM_ENGINES] = {
[RCS] = GEN6_GRDOM_RENDER,
@@ -1647,7 +1604,7 @@ static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask)
ret = gen6_hw_domain_reset(dev_priv, hw_mask);
- intel_uncore_forcewake_reset(dev, true);
+ intel_uncore_forcewake_reset(dev_priv, true);
return ret;
}
@@ -1663,8 +1620,8 @@ static int wait_for_register_fw(struct drm_i915_private *dev_priv,
static int gen8_request_engine_reset(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
@@ -1682,22 +1639,22 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine)
static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
}
-static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask)
+static int gen8_reset_engines(struct drm_i915_private *dev_priv,
+ unsigned engine_mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
for_each_engine_masked(engine, dev_priv, engine_mask)
if (gen8_request_engine_reset(engine))
goto not_ready;
- return gen6_reset_engines(dev, engine_mask);
+ return gen6_reset_engines(dev_priv, engine_mask);
not_ready:
for_each_engine_masked(engine, dev_priv, engine_mask)
@@ -1706,35 +1663,35 @@ not_ready:
return -EIO;
}
-static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *,
- unsigned engine_mask)
+typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
+
+static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
{
if (!i915.reset)
return NULL;
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_INFO(dev_priv)->gen >= 8)
return gen8_reset_engines;
- else if (INTEL_INFO(dev)->gen >= 6)
+ else if (INTEL_INFO(dev_priv)->gen >= 6)
return gen6_reset_engines;
- else if (IS_GEN5(dev))
+ else if (IS_GEN5(dev_priv))
return ironlake_do_reset;
- else if (IS_G4X(dev))
+ else if (IS_G4X(dev_priv))
return g4x_do_reset;
- else if (IS_G33(dev))
+ else if (IS_G33(dev_priv))
return g33_do_reset;
- else if (INTEL_INFO(dev)->gen >= 3)
+ else if (INTEL_INFO(dev_priv)->gen >= 3)
return i915_do_reset;
else
return NULL;
}
-int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask)
+int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int (*reset)(struct drm_device *, unsigned);
+ reset_func reset;
int ret;
- reset = intel_get_gpu_reset(dev);
+ reset = intel_get_gpu_reset(dev_priv);
if (reset == NULL)
return -ENODEV;
@@ -1742,15 +1699,15 @@ int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask)
* request may be dropped and never completes (causing -EIO).
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- ret = reset(dev, engine_mask);
+ ret = reset(dev_priv, engine_mask);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
}
-bool intel_has_gpu_reset(struct drm_device *dev)
+bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
{
- return intel_get_gpu_reset(dev) != NULL;
+ return intel_get_gpu_reset(dev_priv) != NULL;
}
int intel_guc_reset(struct drm_i915_private *dev_priv)
@@ -1758,7 +1715,7 @@ int intel_guc_reset(struct drm_i915_private *dev_priv)
int ret;
unsigned long irqflags;
- if (!i915.enable_guc_submission)
+ if (!HAS_GUC(dev_priv))
return -EINVAL;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -1802,10 +1759,10 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
{
enum forcewake_domains fw_domains;
- if (intel_vgpu_active(dev_priv->dev))
+ if (intel_vgpu_active(dev_priv))
return 0;
- switch (INTEL_INFO(dev_priv)->gen) {
+ switch (INTEL_GEN(dev_priv)) {
case 9:
fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
break;
@@ -1842,10 +1799,10 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
{
enum forcewake_domains fw_domains;
- if (intel_vgpu_active(dev_priv->dev))
+ if (intel_vgpu_active(dev_priv))
return 0;
- switch (INTEL_INFO(dev_priv)->gen) {
+ switch (INTEL_GEN(dev_priv)) {
case 9:
fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
break;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index c15051de8023..68db9621f1f0 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -403,9 +403,10 @@ struct lvds_dvo_timing {
u8 vsync_off:4;
u8 rsvd0:6;
u8 hsync_off_hi:2;
- u8 h_image;
- u8 v_image;
- u8 max_hv;
+ u8 himage_lo;
+ u8 vimage_lo;
+ u8 vimage_hi:4;
+ u8 himage_hi:4;
u8 h_border;
u8 v_border;
u8 rsvd1:3;
@@ -446,10 +447,16 @@ struct bdb_lfp_backlight_data_entry {
u8 obsolete3;
} __packed;
+struct bdb_lfp_backlight_control_method {
+ u8 type:4;
+ u8 controller:4;
+} __packed;
+
struct bdb_lfp_backlight_data {
u8 entry_size;
struct bdb_lfp_backlight_data_entry data[16];
u8 level[16];
+ struct bdb_lfp_backlight_control_method backlight_control[16];
} __packed;
struct aimdb_header {
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index b1223d54d0ab..06a417b2f91e 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -243,7 +243,7 @@ static struct drm_driver mtk_drm_driver = {
.enable_vblank = mtk_drm_crtc_enable_vblank,
.disable_vblank = mtk_drm_crtc_disable_vblank,
- .gem_free_object = mtk_drm_gem_free_object,
+ .gem_free_object_unlocked = mtk_drm_gem_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = mtk_drm_gem_dumb_create,
.dumb_map_offset = mtk_drm_gem_dumb_map_offset,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index ebb470ff7200..2b4b125eebc3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -101,7 +101,7 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- .gem_free_object = mgag200_gem_free_object,
+ .gem_free_object_unlocked = mgag200_gem_free_object,
.dumb_create = mgag200_dumb_create,
.dumb_map_offset = mgag200_dumb_mmap_offset,
.dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index d347dca17267..6b21cb27e1cc 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1352,19 +1352,20 @@ static void mga_crtc_commit(struct drm_crtc *crtc)
* use this for 8-bit mode so can't perform smooth fades on deeper modes,
* but it's a requirement that we provide the function
*/
-static void mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
- int end = (start + size > MGAG200_LUT_SIZE) ? MGAG200_LUT_SIZE : start + size;
int i;
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
mga_crtc->lut_r[i] = red[i] >> 8;
mga_crtc->lut_g[i] = green[i] >> 8;
mga_crtc->lut_b[i] = blue[i] >> 8;
}
mga_crtc_load_lut(crtc);
+
+ return 0;
}
/* Simple cleanup function */
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 67442d50a6c2..f145d256e332 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -106,31 +106,27 @@ out:
static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
- int i, ncrtcs = state->dev->mode_config.num_crtc;
+ int i;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
mdp4_enable(mdp4_kms);
/* see 119ecb7fd */
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
- if (!crtc)
- continue;
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
drm_crtc_vblank_get(crtc);
- }
}
static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
- int i, ncrtcs = state->dev->mode_config.num_crtc;
+ int i;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
/* see 119ecb7fd */
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
- if (!crtc)
- continue;
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
drm_crtc_vblank_put(crtc);
- }
mdp4_disable(mdp4_kms);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 88fe256c1931..4e8ed739f558 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -374,6 +374,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct plane_state pstates[STAGE_MAX + 1];
const struct mdp5_cfg_hw *hw_cfg;
+ const struct drm_plane_state *pstate;
int cnt = 0, i;
DBG("%s: check", mdp5_crtc->name);
@@ -382,20 +383,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
* and that we don't have conflicting mixer stages:
*/
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
- drm_atomic_crtc_state_for_each_plane(plane, state) {
- struct drm_plane_state *pstate;
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
if (cnt >= (hw_cfg->lm.nb_stages)) {
dev_err(dev->dev, "too many planes!\n");
return -EINVAL;
}
- pstate = state->state->plane_states[drm_plane_index(plane)];
- /* plane might not have changed, in which case take
- * current state:
- */
- if (!pstate)
- pstate = plane->state;
pstates[cnt].plane = plane;
pstates[cnt].state = to_mdp5_plane_state(pstate);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 484b4d15e71d..f0c285b1c027 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -78,17 +78,11 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
{
int i;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- int nplanes = mdp5_kms->dev->mode_config.num_total_plane;
-
- for (i = 0; i < nplanes; i++) {
- struct drm_plane *plane = state->planes[i];
- struct drm_plane_state *plane_state = state->plane_states[i];
-
- if (!plane)
- continue;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ for_each_plane_in_state(state, plane, plane_state, i)
mdp5_plane_complete_commit(plane, plane_state);
- }
mdp5_disable(mdp5_kms);
}
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index e3892c263f27..8c3b463620bd 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -84,17 +84,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
struct msm_drm_private *priv = old_state->dev->dev_private;
struct msm_kms *kms = priv->kms;
- int ncrtcs = old_state->dev->mode_config.num_crtc;
int i;
- for (i = 0; i < ncrtcs; i++) {
- crtc = old_state->crtcs[i];
-
- if (!crtc)
- continue;
-
+ for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
if (!crtc->state->enable)
continue;
@@ -192,9 +187,11 @@ int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock)
{
struct msm_drm_private *priv = dev->dev_private;
- int nplanes = dev->mode_config.num_total_plane;
- int ncrtcs = dev->mode_config.num_crtc;
struct msm_commit *c;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
int i, ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -210,28 +207,18 @@ int msm_atomic_commit(struct drm_device *dev,
/*
* Figure out what crtcs we have:
*/
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
- if (!crtc)
- continue;
- c->crtc_mask |= (1 << drm_crtc_index(crtc));
- }
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ c->crtc_mask |= drm_crtc_mask(crtc);
/*
* Figure out what fence to wait for:
*/
- for (i = 0; i < nplanes; i++) {
- struct drm_plane *plane = state->planes[i];
- struct drm_plane_state *new_state = state->plane_states[i];
-
- if (!plane)
- continue;
-
- if ((plane->state->fb != new_state->fb) && new_state->fb) {
- struct drm_gem_object *obj = msm_framebuffer_bo(new_state->fb, 0);
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
+ struct drm_gem_object *obj = msm_framebuffer_bo(plane_state->fb, 0);
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- new_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ plane_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
}
}
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 461dc8b873f0..7919c24c6ddd 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -56,17 +56,9 @@ static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
kfree(msm_fb);
}
-static int msm_framebuffer_dirty(struct drm_framebuffer *fb,
- struct drm_file *file_priv, unsigned flags, unsigned color,
- struct drm_clip_rect *clips, unsigned num_clips)
-{
- return 0;
-}
-
static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
.create_handle = msm_framebuffer_create_handle,
.destroy = msm_framebuffer_destroy,
- .dirty = msm_framebuffer_dirty,
};
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index d9759bf3482e..1a061e3e8b9e 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -184,21 +184,7 @@ fail:
return ret;
}
-static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc,
- u16 red, u16 green, u16 blue, int regno)
-{
- DBG("fbdev: set gamma");
-}
-
-static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
- u16 *red, u16 *green, u16 *blue, int regno)
-{
- DBG("fbdev: get gamma");
-}
-
static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
- .gamma_set = msm_crtc_fb_gamma_set,
- .gamma_get = msm_crtc_fb_gamma_get,
.fb_probe = msm_fbdev_create,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6f318c54da33..0cb7a18cde26 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -785,14 +785,14 @@ nv_crtc_disable(struct drm_crtc *crtc)
nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
}
-static void
-nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
+static int
+nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
uint32_t size)
{
- int end = (start + size > 256) ? 256 : start + size, i;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ int i;
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
nv_crtc->lut.r[i] = r[i];
nv_crtc->lut.g[i] = g[i];
nv_crtc->lut.b[i] = b[i];
@@ -805,10 +805,12 @@ nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
*/
if (!nv_crtc->base.primary->fb) {
nv_crtc->lut.depth = 0;
- return;
+ return 0;
}
nv_crtc_gamma_load(crtc);
+
+ return 0;
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 11f8dd9c0edb..c00ff6e786a3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -22,13 +22,11 @@
* Authors: Ben Skeggs
*/
-#include <linux/apple-gmux.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
-#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include "drmP.h"
@@ -315,13 +313,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
bool boot = false;
int ret;
- /*
- * apple-gmux is needed on dual GPU MacBook Pro
- * to probe the panel if we're the inactive GPU.
- */
- if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
- apple_gmux_present() && pdev != vga_default_device() &&
- !vga_switcheroo_handler_flags())
+ if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
/* remove conflicting drivers (vesafb, efifb etc) */
@@ -970,7 +962,7 @@ driver_stub = {
.gem_prime_vmap = nouveau_gem_prime_vmap,
.gem_prime_vunmap = nouveau_gem_prime_vunmap,
- .gem_free_object = nouveau_gem_object_del,
+ .gem_free_object_unlocked = nouveau_gem_object_del,
.gem_open_object = nouveau_gem_object_open,
.gem_close_object = nouveau_gem_object_close,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 2e3a62d38fe9..64c4ce7115ad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -57,7 +57,8 @@ struct nouveau_fence_priv {
int (*context_new)(struct nouveau_channel *);
void (*context_del)(struct nouveau_channel *);
- u32 contexts, context_base;
+ u32 contexts;
+ u64 context_base;
bool uevent;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 675e9e077a95..08f9c6fa0f7f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -212,7 +212,6 @@ usif_notify_get(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
ntfy->p->base.event = &ntfy->p->e.base;
ntfy->p->base.file_priv = f;
ntfy->p->base.pid = current->pid;
- ntfy->p->base.destroy =(void(*)(struct drm_pending_event *))kfree;
ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF;
ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 3ffc2b0057bf..7a7788212df7 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1346,21 +1346,22 @@ nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
return 0;
}
-static void
+static int
nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
+ uint32_t size)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 end = min_t(u32, start + size, 256);
u32 i;
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
nv_crtc->lut.r[i] = r[i];
nv_crtc->lut.g[i] = g[i];
nv_crtc->lut.b[i] = b[i];
}
nv50_crtc_lut_load(crtc);
+
+ return 0;
}
static void
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 6c0647486945..3b702230a88c 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -142,8 +142,9 @@ static int omap_atomic_commit(struct drm_device *dev,
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_atomic_state_commit *commit;
- unsigned int i;
- int ret;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i, ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
@@ -163,10 +164,8 @@ static int omap_atomic_commit(struct drm_device *dev,
/* Wait until all affected CRTCs have completed previous commits and
* mark them as pending.
*/
- for (i = 0; i < dev->mode_config.num_crtc; ++i) {
- if (state->crtcs[i])
- commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
- }
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ commit->crtcs |= drm_crtc_mask(crtc);
wait_event(priv->commit.wait, !omap_atomic_is_pending(priv, commit));
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 530567cc25b7..983c8cf2441c 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -122,17 +122,9 @@ static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
kfree(omap_fb);
}
-static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
- struct drm_file *file_priv, unsigned flags, unsigned color,
- struct drm_clip_rect *clips, unsigned num_clips)
-{
- return 0;
-}
-
static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
.create_handle = omap_framebuffer_create_handle,
.destroy = omap_framebuffer_destroy,
- .dirty = omap_framebuffer_dirty,
};
static uint32_t get_linear_addr(struct plane *plane,
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 8b5d54385892..ad429683fef7 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -221,7 +221,6 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct qxl_device *qdev = dev->dev_private;
- struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
struct qxl_framebuffer *qfb_src = to_qxl_framebuffer(fb);
struct qxl_framebuffer *qfb_old = to_qxl_framebuffer(crtc->primary->fb);
struct qxl_bo *bo_old = gem_to_qxl_bo(qfb_old->obj);
@@ -252,14 +251,14 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc,
qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0,
&norect, one_clip_rect, inc);
- drm_vblank_get(dev, qcrtc->index);
+ drm_crtc_vblank_get(crtc);
if (event) {
spin_lock_irqsave(&dev->event_lock, flags);
- drm_send_vblank_event(dev, qcrtc->index, event);
+ drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
- drm_vblank_put(dev, qcrtc->index);
+ drm_crtc_vblank_put(crtc);
ret = qxl_bo_reserve(bo, false);
if (!ret) {
@@ -730,7 +729,6 @@ static int qdev_crtc_init(struct drm_device *dev, int crtc_id)
drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs);
qxl_crtc->index = crtc_id;
- drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256);
drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index dc9df5fe50ba..460bbceae297 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -256,7 +256,7 @@ static struct drm_driver qxl_driver = {
.gem_prime_vmap = qxl_gem_prime_vmap,
.gem_prime_vunmap = qxl_gem_prime_vunmap,
.gem_prime_mmap = qxl_gem_prime_mmap,
- .gem_free_object = qxl_gem_object_free,
+ .gem_free_object_unlocked = qxl_gem_object_free,
.gem_open_object = qxl_gem_object_open,
.gem_close_object = qxl_gem_object_close,
.fops = &qxl_fops,
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 5ea57f6320b8..df2657051afd 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -131,10 +131,6 @@ static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
int ret;
int aligned_size, size;
int height = mode_cmd->height;
- int bpp;
- int depth;
-
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);
size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 4efa8e261baf..f599cd073b72 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -96,7 +96,7 @@ retry:
return 0;
if (have_drawable_releases && sc > 300) {
- FENCE_WARN(fence, "failed to wait on release %d "
+ FENCE_WARN(fence, "failed to wait on release %llu "
"after spincount %d\n",
fence->context & ~0xf0000000, sc);
goto signaled;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 80b24a495d6c..5633ee3eb46e 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2386,7 +2386,7 @@ struct radeon_device {
struct radeon_mman mman;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
wait_queue_head_t fence_queue;
- unsigned fence_context;
+ u64 fence_context;
struct mutex ring_lock;
struct radeon_ring ring[RADEON_NUM_RINGS];
bool ib_pool_ready;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 6a41b4982647..e85c7a2f565b 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -231,19 +231,21 @@ void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
*blue = radeon_crtc->lut_b[regno] << 6;
}
-static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
radeon_crtc->lut_r[i] = red[i] >> 6;
radeon_crtc->lut_g[i] = green[i] >> 6;
radeon_crtc->lut_b[i] = blue[i] >> 6;
}
radeon_crtc_load_lut(crtc);
+
+ return 0;
}
static void radeon_crtc_destroy(struct drm_crtc *crtc)
@@ -381,7 +383,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
- drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
+ drm_crtc_vblank_put(&radeon_crtc->base);
radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
queue_work(radeon_crtc->flip_queue, &work->unpin_work);
}
@@ -598,7 +600,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
}
work->base = base;
- r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
+ r = drm_crtc_vblank_get(crtc);
if (r) {
DRM_ERROR("failed to get vblank before flip\n");
goto pflip_cleanup;
@@ -625,7 +627,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
return 0;
vblank_cleanup:
- drm_vblank_put(crtc->dev, radeon_crtc->crtc_id);
+ drm_crtc_vblank_put(&radeon_crtc->base);
pflip_cleanup:
if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
@@ -688,6 +690,7 @@ radeon_crtc_set_config(struct drm_mode_set *set)
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
+
static const struct drm_crtc_funcs radeon_crtc_funcs = {
.cursor_set2 = radeon_crtc_cursor_set2,
.cursor_move = radeon_crtc_cursor_move,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index b55aa740171f..a455dc7d4aa1 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -34,11 +34,9 @@
#include "radeon_drv.h"
#include <drm/drm_pciids.h>
-#include <linux/apple-gmux.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
-#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_gem.h>
@@ -340,13 +338,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
if (ret == -EPROBE_DEFER)
return ret;
- /*
- * apple-gmux is needed on dual GPU MacBook Pro
- * to probe the panel if we're the inactive GPU.
- */
- if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
- apple_gmux_present() && pdev != vga_default_device() &&
- !vga_switcheroo_handler_flags())
+ if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
/* Get rid of things like offb */
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 38226d925a5b..4b6542538ff9 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -246,6 +246,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
static void radeon_pm_set_clocks(struct radeon_device *rdev)
{
+ struct drm_crtc *crtc;
int i, r;
/* no need to take locks, etc. if nothing's going to change */
@@ -274,26 +275,30 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
radeon_unmap_vram_bos(rdev);
if (rdev->irq.installed) {
- for (i = 0; i < rdev->num_crtc; i++) {
+ i = 0;
+ drm_for_each_crtc(crtc, rdev->ddev) {
if (rdev->pm.active_crtcs & (1 << i)) {
/* This can fail if a modeset is in progress */
- if (drm_vblank_get(rdev->ddev, i) == 0)
+ if (drm_crtc_vblank_get(crtc) == 0)
rdev->pm.req_vblank |= (1 << i);
else
DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
i);
}
+ i++;
}
}
radeon_set_power_state(rdev);
if (rdev->irq.installed) {
- for (i = 0; i < rdev->num_crtc; i++) {
+ i = 0;
+ drm_for_each_crtc(crtc, rdev->ddev) {
if (rdev->pm.req_vblank & (1 << i)) {
rdev->pm.req_vblank &= ~(1 << i);
- drm_vblank_put(rdev->ddev, i);
+ drm_crtc_vblank_put(crtc);
}
+ i++;
}
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index fb9242d27883..48ec4b6e8b26 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -217,7 +217,7 @@ static struct drm_driver rcar_du_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = rcar_du_enable_vblank,
.disable_vblank = rcar_du_disable_vblank,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index e70a4f33d970..86c109b16876 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -288,6 +288,8 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
{
struct rcar_du_device *rcdu = dev->dev_private;
struct rcar_du_commit *commit;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
unsigned int i;
int ret;
@@ -309,10 +311,8 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
/* Wait until all affected CRTCs have completed previous commits and
* mark them as pending.
*/
- for (i = 0; i < dev->mode_config.num_crtc; ++i) {
- if (state->crtcs[i])
- commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
- }
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ commit->crtcs |= drm_crtc_mask(crtc);
spin_lock(&rcdu->commit.wait.lock);
ret = wait_event_interruptible_locked(rcdu->commit.wait,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index d445e67f78e1..bfe31ca870cc 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -140,18 +140,17 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
bool needs_realloc = false;
unsigned int groups = 0;
unsigned int i;
+ struct drm_plane *drm_plane;
+ struct drm_plane_state *drm_plane_state;
/* Check if hardware planes need to be reallocated. */
- for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
+ for_each_plane_in_state(state, drm_plane, drm_plane_state, i) {
struct rcar_du_plane_state *plane_state;
struct rcar_du_plane *plane;
unsigned int index;
- if (!state->planes[i])
- continue;
-
- plane = to_rcar_plane(state->planes[i]);
- plane_state = to_rcar_plane_state(state->plane_states[i]);
+ plane = to_rcar_plane(drm_plane);
+ plane_state = to_rcar_plane_state(drm_plane_state);
dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__,
plane->group->index, plane - plane->group->planes);
@@ -247,18 +246,15 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
}
/* Reallocate hardware planes for each plane that needs it. */
- for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
+ for_each_plane_in_state(state, drm_plane, drm_plane_state, i) {
struct rcar_du_plane_state *plane_state;
struct rcar_du_plane *plane;
unsigned int crtc_planes;
unsigned int free;
int idx;
- if (!state->planes[i])
- continue;
-
- plane = to_rcar_plane(state->planes[i]);
- plane_state = to_rcar_plane_state(state->plane_states[i]);
+ plane = to_rcar_plane(drm_plane);
+ plane_state = to_rcar_plane_state(drm_plane_state);
dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__,
plane->group->index, plane - plane->group->planes);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index a409d1f703cb..09a4d429c0f0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -19,6 +19,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
@@ -300,11 +301,6 @@ static const struct file_operations rockchip_drm_driver_fops = {
.release = drm_release,
};
-const struct vm_operations_struct rockchip_drm_vm_ops = {
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static struct drm_driver rockchip_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_PRIME | DRIVER_ATOMIC,
@@ -315,8 +311,8 @@ static struct drm_driver rockchip_drm_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = rockchip_drm_crtc_enable_vblank,
.disable_vblank = rockchip_drm_crtc_disable_vblank,
- .gem_vm_ops = &rockchip_drm_vm_ops,
- .gem_free_object = rockchip_gem_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .gem_free_object_unlocked = rockchip_gem_free_object,
.dumb_create = rockchip_gem_dumb_create,
.dumb_map_offset = rockchip_gem_dumb_map_offset,
.dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 1c4d5b5a70a2..5567fb43e674 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -889,7 +889,7 @@ static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
if (e && e->base.file_priv == file_priv) {
vop->event = NULL;
- e->base.destroy(&e->base);
+ kfree(&e->base);
file_priv->event_space += sizeof(e->event);
}
spin_unlock_irqrestore(&drm->event_lock, flags);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 1e154fc779d5..6547b1db460a 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -441,7 +441,7 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
scrtc->event = NULL;
if (event) {
drm_crtc_send_vblank_event(&scrtc->crtc, event);
- drm_vblank_put(dev, 0);
+ drm_crtc_vblank_put(&scrtc->crtc);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -467,7 +467,7 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
if (event) {
event->pipe = 0;
- drm_vblank_get(dev, 0);
+ drm_crtc_vblank_get(&scrtc->crtc);
spin_lock_irqsave(&dev->event_lock, flags);
scrtc->event = event;
spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 7700ff172079..ee79264b5b6a 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -264,7 +264,7 @@ static struct drm_driver shmob_drm_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = shmob_drm_enable_vblank,
.disable_vblank = shmob_drm_disable_vblank,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 4e990299735c..53aa0029295b 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -105,12 +105,6 @@ static int cursor_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_cursor *cursor = (struct sti_cursor *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_plane_to_str(&cursor->plane), cursor->regs);
@@ -129,7 +123,6 @@ static int cursor_dbg_show(struct seq_file *s, void *data)
DBGFS_DUMP(CUR_AWE);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 872495e72294..b440617a7019 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -72,11 +72,6 @@ static int sti_drm_fps_dbg_show(struct seq_file *s, void *data)
struct drm_info_node *node = s->private;
struct drm_device *dev = node->minor->dev;
struct drm_plane *p;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
list_for_each_entry(p, &dev->mode_config.plane_list, head) {
struct sti_plane *plane = to_sti_plane(p);
@@ -86,7 +81,6 @@ static int sti_drm_fps_dbg_show(struct seq_file *s, void *data)
plane->fps_info.fips_str);
}
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -310,7 +304,7 @@ static struct drm_driver sti_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
.load = sti_load,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 25f76632002c..d439128e6309 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -177,12 +177,6 @@ static int dvo_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_dvo *dvo = (struct sti_dvo *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "DVO: (vaddr = 0x%p)", dvo->regs);
DBGFS_DUMP(DVO_AWG_DIGSYNC_CTRL);
@@ -193,7 +187,6 @@ static int dvo_dbg_show(struct seq_file *s, void *data)
dvo_dbg_awg_microcode(s, dvo->regs + DVO_DIGSYNC_INSTR_I);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index ff33c38da197..fdf69b5a041b 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -208,14 +208,8 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
struct drm_plane *drm_plane = &gdp->plane.drm_plane;
struct drm_crtc *crtc = drm_plane->crtc;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_plane_to_str(&gdp->plane), gdp->regs);
@@ -248,7 +242,6 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
seq_printf(s, " Connected to DRM CRTC #%d (%s)\n",
crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)));
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -279,13 +272,7 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg)
{
struct drm_info_node *node = s->private;
struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
unsigned int b;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
for (b = 0; b < GDP_NODE_NB_BANK; b++) {
seq_printf(s, "\n%s[%d].top", sti_plane_to_str(&gdp->plane), b);
@@ -294,7 +281,6 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg)
gdp_node_dump_node(s, gdp->node_list[b].btm_field);
}
- mutex_unlock(&dev->struct_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index f7d3464cdf09..9f49c00f1a02 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -376,12 +376,6 @@ static int hda_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_hda *hda = (struct sti_hda *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "HD Analog: (vaddr = 0x%p)", hda->regs);
DBGFS_DUMP(HDA_ANA_CFG);
@@ -397,7 +391,6 @@ static int hda_dbg_show(struct seq_file *s, void *data)
hda_dbg_video_dacs_ctrl(s, hda->video_dacs_ctrl);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 6ef0715bd5b9..85545ebf88d3 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -628,12 +628,6 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_hdmi *hdmi = (struct sti_hdmi *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "HDMI: (vaddr = 0x%p)", hdmi->regs);
DBGFS_DUMP("\n", HDMI_CFG);
@@ -690,7 +684,6 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_VENDOR);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 1edec29b9e45..1c06a50fddca 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -555,14 +555,8 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
int cmd, cmd_offset, infoxp70;
void *virt;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_plane_to_str(&hqvdp->plane), hqvdp->regs);
@@ -630,7 +624,6 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data)
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index aed7801b51f7..6f86f2b2b6a5 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -151,12 +151,6 @@ static int mixer_dbg_show(struct seq_file *s, void *arg)
{
struct drm_info_node *node = s->private;
struct sti_mixer *mixer = (struct sti_mixer *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_mixer_to_str(mixer), mixer->regs);
@@ -176,7 +170,6 @@ static int mixer_dbg_show(struct seq_file *s, void *arg)
mixer_dbg_mxn(s, mixer->regs + GAM_MIXER_MX0);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index f983db5a59da..60fe0afa5644 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -515,13 +515,7 @@ static int tvout_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_tvout *tvout = (struct sti_tvout *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
struct drm_crtc *crtc;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "TVOUT: (vaddr = 0x%p)", tvout->regs);
@@ -587,7 +581,6 @@ static int tvout_dbg_show(struct seq_file *s, void *data)
DBGFS_DUMP(TVO_AUX_IN_VID_FORMAT);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 523ed19f5ac6..0132aaebe598 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -92,12 +92,6 @@ static int vid_dbg_show(struct seq_file *s, void *arg)
{
struct drm_info_node *node = s->private;
struct sti_vid *vid = (struct sti_vid *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "VID: (vaddr= 0x%p)", vid->regs);
@@ -122,7 +116,6 @@ static int vid_dbg_show(struct seq_file *s, void *arg)
DBGFS_DUMP(VID_CSAT);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 76e922bb60e5..68e9d85085fb 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -103,7 +103,7 @@ static struct drm_driver sun4i_drv_driver = {
.dumb_create = drm_gem_cma_dumb_create,
.dumb_destroy = drm_gem_dumb_destroy,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
/* PRIME Operations */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 709bc903524d..308e197908fc 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -549,7 +549,7 @@ static struct drm_driver tilcdc_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = tilcdc_enable_vblank,
.disable_vblank = tilcdc_disable_vblank,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index b87afee44995..f92ea9579674 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -376,7 +376,7 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc,
spin_lock_irqsave(&dev->event_lock, flags);
if (event)
- drm_send_vblank_event(dev, 0, event);
+ drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irqrestore(&dev->event_lock, flags);
crtc->primary->fb = fb;
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index e5a9d3aaf45f..59adcf8532dd 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -291,8 +291,6 @@ static void vc4_bo_cache_free_old(struct drm_device *dev)
/* Called on the last userspace/kernel unreference of the BO. Returns
* it to the BO cache if possible, otherwise frees it.
- *
- * Note that this is called with the struct_mutex held.
*/
void vc4_free_object(struct drm_gem_object *gem_bo)
{
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 904d0754ad78..4c0f26a644a3 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -175,20 +175,22 @@ vc4_crtc_lut_load(struct drm_crtc *crtc)
HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
}
-static void
+static int
vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
+ uint32_t size)
{
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
u32 i;
- for (i = start; i < start + size; i++) {
+ for (i = 0; i < size; i++) {
vc4_crtc->lut_r[i] = r[i] >> 8;
vc4_crtc->lut_g[i] = g[i] >> 8;
vc4_crtc->lut_b[i] = b[i] >> 8;
}
vc4_crtc_lut_load(crtc);
+
+ return 0;
}
static u32 vc4_get_fifo_full_level(u32 format)
@@ -395,6 +397,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane;
unsigned long flags;
+ const struct drm_plane_state *plane_state;
u32 dlist_count = 0;
int ret;
@@ -404,18 +407,8 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
if (hweight32(state->connector_mask) > 1)
return -EINVAL;
- drm_atomic_crtc_state_for_each_plane(plane, state) {
- struct drm_plane_state *plane_state =
- state->state->plane_states[drm_plane_index(plane)];
-
- /* plane might not have changed, in which case take
- * current state:
- */
- if (!plane_state)
- plane_state = plane->state;
-
+ drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, state)
dlist_count += vc4_plane_dlist_size(plane_state);
- }
dlist_count++; /* Account for SCALER_CTL0_END. */
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 3446ece21b4a..58b8fc036332 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -99,7 +99,7 @@ static struct drm_driver vc4_drm_driver = {
#endif
.gem_create_object = vc4_create_object,
- .gem_free_object = vc4_free_object,
+ .gem_free_object_unlocked = vc4_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 37cac59401d7..c799baabc008 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -469,7 +469,7 @@ int vc4_kms_load(struct drm_device *dev);
struct drm_plane *vc4_plane_init(struct drm_device *dev,
enum drm_plane_type type);
u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
-u32 vc4_plane_dlist_size(struct drm_plane_state *state);
+u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
void vc4_plane_async_set_fb(struct drm_plane *plane,
struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 46899d6de675..6155e8aca1c6 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -53,10 +53,8 @@ vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
{
unsigned int i;
- mutex_lock(&dev->struct_mutex);
for (i = 0; i < state->user_state.bo_count; i++)
- drm_gem_object_unreference(state->bo[i]);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(state->bo[i]);
kfree(state);
}
@@ -687,11 +685,9 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned i;
- /* Need the struct lock for drm_gem_object_unreference(). */
- mutex_lock(&dev->struct_mutex);
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++)
- drm_gem_object_unreference(&exec->bo[i]->base);
+ drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
kfree(exec->bo);
}
@@ -699,9 +695,8 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
struct vc4_bo *bo = list_first_entry(&exec->unref_list,
struct vc4_bo, unref_head);
list_del(&bo->unref_head);
- drm_gem_object_unreference(&bo->base.base);
+ drm_gem_object_unreference_unlocked(&bo->base.base);
}
- mutex_unlock(&dev->struct_mutex);
mutex_lock(&vc4->power_lock);
if (--vc4->power_refcount == 0)
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index cb37751bc99f..39c0b2048bfd 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -111,6 +111,8 @@ static int vc4_atomic_commit(struct drm_device *dev,
int i;
uint64_t wait_seqno = 0;
struct vc4_commit *c;
+ struct drm_plane *plane;
+ struct drm_plane_state *new_state;
c = commit_init(state);
if (!c)
@@ -130,13 +132,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
return ret;
}
- for (i = 0; i < dev->mode_config.num_total_plane; i++) {
- struct drm_plane *plane = state->planes[i];
- struct drm_plane_state *new_state = state->plane_states[i];
-
- if (!plane)
- continue;
-
+ for_each_plane_in_state(state, plane, new_state, i) {
if ((plane->state->fb != new_state->fb) && new_state->fb) {
struct drm_gem_cma_object *cma_bo =
drm_fb_cma_get_gem_obj(new_state->fb, 0);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 4037b52fde31..5d2c3d9fd17a 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -690,9 +690,10 @@ u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
return vc4_state->dlist_count;
}
-u32 vc4_plane_dlist_size(struct drm_plane_state *state)
+u32 vc4_plane_dlist_size(const struct drm_plane_state *state)
{
- struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ const struct vc4_plane_state *vc4_state =
+ container_of(state, typeof(*vc4_state), base);
return vc4_state->dlist_count;
}
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 341f9be3dde6..1b4cc8b27080 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -235,7 +235,7 @@ static const struct file_operations vgem_driver_fops = {
static struct drm_driver vgem_driver = {
.driver_features = DRIVER_GEM,
- .gem_free_object = vgem_gem_free_object,
+ .gem_free_object_unlocked = vgem_gem_free_object,
.gem_vm_ops = &vgem_gem_vm_ops,
.ioctls = vgem_ioctls,
.fops = &vgem_driver_fops,
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index d4305da88f44..d82ae1cddfcf 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -29,8 +29,8 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
-#define XRES_MIN 320
-#define YRES_MIN 200
+#define XRES_MIN 32
+#define YRES_MIN 32
#define XRES_DEF 1024
#define YRES_DEF 768
@@ -38,86 +38,6 @@
#define XRES_MAX 8192
#define YRES_MAX 8192
-static void
-virtio_gpu_hide_cursor(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_output *output)
-{
- output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
- output->cursor.resource_id = 0;
- virtio_gpu_cursor_ping(vgdev, output);
-}
-
-static int virtio_gpu_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height,
- int32_t hot_x, int32_t hot_y)
-{
- struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
- struct virtio_gpu_output *output =
- container_of(crtc, struct virtio_gpu_output, crtc);
- struct drm_gem_object *gobj = NULL;
- struct virtio_gpu_object *qobj = NULL;
- struct virtio_gpu_fence *fence = NULL;
- int ret = 0;
-
- if (handle == 0) {
- virtio_gpu_hide_cursor(vgdev, output);
- return 0;
- }
-
- /* lookup the cursor */
- gobj = drm_gem_object_lookup(file_priv, handle);
- if (gobj == NULL)
- return -ENOENT;
-
- qobj = gem_to_virtio_gpu_obj(gobj);
-
- if (!qobj->hw_res_handle) {
- ret = -EINVAL;
- goto out;
- }
-
- virtio_gpu_cmd_transfer_to_host_2d(vgdev, qobj->hw_res_handle, 0,
- cpu_to_le32(64),
- cpu_to_le32(64),
- 0, 0, &fence);
- ret = virtio_gpu_object_reserve(qobj, false);
- if (!ret) {
- reservation_object_add_excl_fence(qobj->tbo.resv,
- &fence->f);
- fence_put(&fence->f);
- virtio_gpu_object_unreserve(qobj);
- virtio_gpu_object_wait(qobj, false);
- }
-
- output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
- output->cursor.resource_id = cpu_to_le32(qobj->hw_res_handle);
- output->cursor.hot_x = cpu_to_le32(hot_x);
- output->cursor.hot_y = cpu_to_le32(hot_y);
- virtio_gpu_cursor_ping(vgdev, output);
- ret = 0;
-
-out:
- drm_gem_object_unreference_unlocked(gobj);
- return ret;
-}
-
-static int virtio_gpu_crtc_cursor_move(struct drm_crtc *crtc,
- int x, int y)
-{
- struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
- struct virtio_gpu_output *output =
- container_of(crtc, struct virtio_gpu_output, crtc);
-
- output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
- output->cursor.pos.x = cpu_to_le32(x);
- output->cursor.pos.y = cpu_to_le32(y);
- virtio_gpu_cursor_ping(vgdev, output);
- return 0;
-}
-
static int virtio_gpu_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
@@ -156,7 +76,7 @@ static int virtio_gpu_page_flip(struct drm_crtc *crtc,
if (event) {
spin_lock_irqsave(&crtc->dev->event_lock, irqflags);
- drm_send_vblank_event(crtc->dev, -1, event);
+ drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irqrestore(&crtc->dev->event_lock, irqflags);
}
@@ -164,8 +84,6 @@ static int virtio_gpu_page_flip(struct drm_crtc *crtc,
}
static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
- .cursor_set2 = virtio_gpu_crtc_cursor_set,
- .cursor_move = virtio_gpu_crtc_cursor_move,
.set_config = drm_atomic_helper_set_config,
.destroy = drm_crtc_cleanup,
@@ -406,7 +324,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
struct drm_connector *connector = &output->conn;
struct drm_encoder *encoder = &output->enc;
struct drm_crtc *crtc = &output->crtc;
- struct drm_plane *plane;
+ struct drm_plane *primary, *cursor;
output->index = index;
if (index == 0) {
@@ -415,13 +333,17 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
output->info.r.height = cpu_to_le32(YRES_DEF);
}
- plane = virtio_gpu_plane_init(vgdev, index);
- if (IS_ERR(plane))
- return PTR_ERR(plane);
- drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+ primary = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_PRIMARY, index);
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+ cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index);
+ if (IS_ERR(cursor))
+ return PTR_ERR(cursor);
+ drm_crtc_init_with_planes(dev, crtc, primary, cursor,
&virtio_gpu_crtc_funcs, NULL);
drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
- plane->crtc = crtc;
+ primary->crtc = crtc;
+ cursor->crtc = crtc;
drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
@@ -466,10 +388,30 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
return &virtio_gpu_fb->base;
}
+static int vgdev_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock)
+{
+ if (nonblock)
+ return -EBUSY;
+
+ drm_atomic_helper_swap_state(dev, state);
+ drm_atomic_helper_wait_for_fences(dev, state);
+
+ drm_atomic_helper_commit_modeset_disables(dev, state);
+ drm_atomic_helper_commit_modeset_enables(dev, state);
+ drm_atomic_helper_commit_planes(dev, state, true);
+
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+ drm_atomic_helper_cleanup_planes(dev, state);
+ drm_atomic_state_free(state);
+ return 0;
+}
+
static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = {
.fb_create = virtio_gpu_user_framebuffer_create,
.atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
+ .atomic_commit = vgdev_atomic_commit,
};
int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 3cc7afa77a35..5820b7020ae5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -143,7 +143,7 @@ static struct drm_driver driver = {
.gem_prime_vunmap = virtgpu_gem_prime_vunmap,
.gem_prime_mmap = virtgpu_gem_prime_mmap,
- .gem_free_object = virtio_gpu_gem_free_object,
+ .gem_free_object_unlocked = virtio_gpu_gem_free_object,
.gem_open_object = virtio_gpu_gem_object_open,
.gem_close_object = virtio_gpu_gem_object_close,
.fops = &virtio_gpu_driver_fops,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 0a54f43f846a..acf556a35cb2 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -33,6 +33,7 @@
#include <drm/drmP.h>
#include <drm/drm_gem.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc_helper.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
@@ -335,6 +336,7 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
/* virtio_gpu_plane.c */
struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
+ enum drm_plane_type type,
int index);
/* virtio_gpu_ttm.c */
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 70b44a2345ab..925ca25209df 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -38,6 +38,10 @@ static const uint32_t virtio_gpu_formats[] = {
DRM_FORMAT_ABGR8888,
};
+static const uint32_t virtio_gpu_cursor_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
static void virtio_gpu_plane_destroy(struct drm_plane *plane)
{
kfree(plane);
@@ -58,16 +62,22 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
return 0;
}
-static void virtio_gpu_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
- struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(plane->crtc);
+ struct virtio_gpu_output *output = NULL;
struct virtio_gpu_framebuffer *vgfb;
struct virtio_gpu_object *bo;
uint32_t handle;
+ if (plane->state->crtc)
+ output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
+ if (old_state->crtc)
+ output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
+ WARN_ON(!output);
+
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
bo = gem_to_virtio_gpu_obj(vgfb->obj);
@@ -75,55 +85,149 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane,
if (bo->dumb) {
virtio_gpu_cmd_transfer_to_host_2d
(vgdev, handle, 0,
- cpu_to_le32(plane->state->crtc_w),
- cpu_to_le32(plane->state->crtc_h),
- plane->state->crtc_x, plane->state->crtc_y, NULL);
+ cpu_to_le32(plane->state->src_w >> 16),
+ cpu_to_le32(plane->state->src_h >> 16),
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16, NULL);
}
} else {
handle = 0;
}
- DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d\n", handle,
+ DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle,
plane->state->crtc_w, plane->state->crtc_h,
- plane->state->crtc_x, plane->state->crtc_y);
+ plane->state->crtc_x, plane->state->crtc_y,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16);
virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
- plane->state->crtc_w,
- plane->state->crtc_h,
- plane->state->crtc_x,
- plane->state->crtc_y);
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16);
virtio_gpu_cmd_resource_flush(vgdev, handle,
- plane->state->crtc_x,
- plane->state->crtc_y,
- plane->state->crtc_w,
- plane->state->crtc_h);
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
}
+static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_device *dev = plane->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_output *output = NULL;
+ struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_fence *fence = NULL;
+ struct virtio_gpu_object *bo = NULL;
+ uint32_t handle;
+ int ret = 0;
-static const struct drm_plane_helper_funcs virtio_gpu_plane_helper_funcs = {
+ if (plane->state->crtc)
+ output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
+ if (old_state->crtc)
+ output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
+ WARN_ON(!output);
+
+ if (plane->state->fb) {
+ vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ bo = gem_to_virtio_gpu_obj(vgfb->obj);
+ handle = bo->hw_res_handle;
+ } else {
+ handle = 0;
+ }
+
+ if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
+ /* new cursor -- update & wait */
+ virtio_gpu_cmd_transfer_to_host_2d
+ (vgdev, handle, 0,
+ cpu_to_le32(plane->state->crtc_w),
+ cpu_to_le32(plane->state->crtc_h),
+ 0, 0, &fence);
+ ret = virtio_gpu_object_reserve(bo, false);
+ if (!ret) {
+ reservation_object_add_excl_fence(bo->tbo.resv,
+ &fence->f);
+ fence_put(&fence->f);
+ fence = NULL;
+ virtio_gpu_object_unreserve(bo);
+ virtio_gpu_object_wait(bo, false);
+ }
+ }
+
+ if (plane->state->fb != old_state->fb) {
+ DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->fb ? plane->state->fb->hot_x : 0,
+ plane->state->fb ? plane->state->fb->hot_y : 0);
+ output->cursor.hdr.type =
+ cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
+ output->cursor.resource_id = cpu_to_le32(handle);
+ if (plane->state->fb) {
+ output->cursor.hot_x =
+ cpu_to_le32(plane->state->fb->hot_x);
+ output->cursor.hot_y =
+ cpu_to_le32(plane->state->fb->hot_y);
+ } else {
+ output->cursor.hot_x = cpu_to_le32(0);
+ output->cursor.hot_y = cpu_to_le32(0);
+ }
+ } else {
+ DRM_DEBUG("move +%d+%d\n",
+ plane->state->crtc_x,
+ plane->state->crtc_y);
+ output->cursor.hdr.type =
+ cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
+ }
+ output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
+ output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
+ virtio_gpu_cursor_ping(vgdev, output);
+}
+
+static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
+ .atomic_check = virtio_gpu_plane_atomic_check,
+ .atomic_update = virtio_gpu_primary_plane_update,
+};
+
+static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
.atomic_check = virtio_gpu_plane_atomic_check,
- .atomic_update = virtio_gpu_plane_atomic_update,
+ .atomic_update = virtio_gpu_cursor_plane_update,
};
struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
+ enum drm_plane_type type,
int index)
{
struct drm_device *dev = vgdev->ddev;
+ const struct drm_plane_helper_funcs *funcs;
struct drm_plane *plane;
- int ret;
+ const uint32_t *formats;
+ int ret, nformats;
plane = kzalloc(sizeof(*plane), GFP_KERNEL);
if (!plane)
return ERR_PTR(-ENOMEM);
+ if (type == DRM_PLANE_TYPE_CURSOR) {
+ formats = virtio_gpu_cursor_formats;
+ nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
+ funcs = &virtio_gpu_cursor_helper_funcs;
+ } else {
+ formats = virtio_gpu_formats;
+ nformats = ARRAY_SIZE(virtio_gpu_formats);
+ funcs = &virtio_gpu_primary_helper_funcs;
+ }
ret = drm_universal_plane_init(dev, plane, 1 << index,
&virtio_gpu_plane_funcs,
- virtio_gpu_formats,
- ARRAY_SIZE(virtio_gpu_formats),
- DRM_PLANE_TYPE_PRIMARY, NULL);
+ formats, nformats,
+ type, NULL);
if (ret)
goto err_plane_init;
- drm_plane_helper_add(plane, &virtio_gpu_plane_helper_funcs);
+ drm_plane_helper_add(plane, funcs);
return plane;
err_plane_init:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index e959df6ede83..26ac8e80a478 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -46,7 +46,7 @@ struct vmw_fence_manager {
bool goal_irq_on; /* Protected by @goal_irq_mutex */
bool seqno_valid; /* Protected by @lock, and may not be set to true
without the @goal_irq_mutex held. */
- unsigned ctx;
+ u64 ctx;
};
struct vmw_user_fence {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 55231cce73a0..8a69d4da40b5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1404,9 +1404,9 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
return 0;
}
-void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
- u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
+int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
+ u16 *r, u16 *g, u16 *b,
+ uint32_t size)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
int i;
@@ -1418,6 +1418,8 @@ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
}
+
+ return 0;
}
int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 57203212c501..ff4803c107bc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -195,9 +195,9 @@ struct vmw_display_unit {
void vmw_du_cleanup(struct vmw_display_unit *du);
void vmw_du_crtc_save(struct drm_crtc *crtc);
void vmw_du_crtc_restore(struct drm_crtc *crtc);
-void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
+int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size);
+ uint32_t size);
int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height,
int32_t hot_x, int32_t hot_y);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index cbd7c986d926..2df216b39cc5 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -30,6 +30,7 @@
#define pr_fmt(fmt) "vga_switcheroo: " fmt
+#include <linux/apple-gmux.h>
#include <linux/console.h>
#include <linux/debugfs.h>
#include <linux/fb.h>
@@ -308,7 +309,8 @@ static int register_client(struct pci_dev *pdev,
*
* Register vga client (GPU). Enable vga_switcheroo if another GPU and a
* handler have already registered. The power state of the client is assumed
- * to be ON.
+ * to be ON. Beforehand, vga_switcheroo_client_probe_defer() shall be called
+ * to ensure that all prerequisites are met.
*
* Return: 0 on success, -ENOMEM on memory allocation error.
*/
@@ -329,7 +331,8 @@ EXPORT_SYMBOL(vga_switcheroo_register_client);
* @id: client identifier
*
* Register audio client (audio device on a GPU). The power state of the
- * client is assumed to be ON.
+ * client is assumed to be ON. Beforehand, vga_switcheroo_client_probe_defer()
+ * shall be called to ensure that all prerequisites are met.
*
* Return: 0 on success, -ENOMEM on memory allocation error.
*/
@@ -376,6 +379,33 @@ find_active_client(struct list_head *head)
}
/**
+ * vga_switcheroo_client_probe_defer() - whether to defer probing a given client
+ * @pdev: client pci device
+ *
+ * Determine whether any prerequisites are not fulfilled to probe a given
+ * client. Drivers shall invoke this early on in their ->probe callback
+ * and return %-EPROBE_DEFER if it evaluates to %true. Thou shalt not
+ * register the client ere thou hast called this.
+ *
+ * Return: %true if probing should be deferred, otherwise %false.
+ */
+bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev)
+{
+ if ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
+ /*
+ * apple-gmux is needed on pre-retina MacBook Pro
+ * to probe the panel if pdev is the inactive GPU.
+ */
+ if (apple_gmux_present() && pdev != vga_default_device() &&
+ !vgasr_priv.handler_flags)
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(vga_switcheroo_client_probe_defer);
+
+/**
* vga_switcheroo_get_client_state() - obtain power state of a given client
* @pdev: client pci device
*
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index b3cc3ab63799..6fc156a3918d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -205,7 +205,9 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
goto free_uar;
}
- uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT);
+ uar->bf_map = io_mapping_map_wc(priv->bf_mapping,
+ uar->index << PAGE_SHIFT,
+ PAGE_SIZE);
if (!uar->bf_map) {
err = -ENOMEM;
goto unamp_uar;
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index b56885c14839..ebb34dca60df 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -68,7 +68,8 @@ struct sync_timeline {
/* protected by child_list_lock */
bool destroyed;
- int context, value;
+ u64 context;
+ int value;
struct list_head child_list_head;
spinlock_t child_list_lock;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 84f1a8eefbdb..9e5eefd6f733 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -52,10 +52,12 @@
#include <linux/poll.h>
#include <linux/ratelimit.h>
#include <linux/sched.h>
+#include <linux/seqlock.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
+#include <linux/fence.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
@@ -282,12 +284,12 @@ struct drm_ioctl_desc {
/* Event queued up for userspace to read */
struct drm_pending_event {
struct drm_event *event;
+ struct fence *fence;
struct list_head link;
struct list_head pending_link;
struct drm_file *file_priv;
pid_t pid; /* pid of requester, no guarantee it's valid by the time
we deliver the event, for tracing only */
- void (*destroy)(struct drm_pending_event *event);
};
/* initial implementaton using a linked list - todo hashtab */
@@ -392,11 +394,6 @@ struct drm_master {
void *driver_priv;
};
-/* Size of ringbuffer for vblank timestamps. Just double-buffer
- * in initial implementation.
- */
-#define DRM_VBLANKTIME_RBSIZE 2
-
/* Flags and return codes for get_vblank_timestamp() driver function. */
#define DRM_CALLED_FROM_VBLIRQ 1
#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
@@ -434,7 +431,7 @@ struct drm_driver {
*
* Driver callback for fetching a raw hardware vblank counter for @crtc.
* If a device doesn't have a hardware counter, the driver can simply
- * return the value of drm_vblank_count. The DRM core will account for
+ * use drm_vblank_no_hw_counter() function. The DRM core will account for
* missed vblank events while interrupts where disabled based on system
* timestamps.
*
@@ -452,8 +449,8 @@ struct drm_driver {
* @pipe: which irq to enable
*
* Enable vblank interrupts for @crtc. If the device doesn't have
- * a hardware vblank counter, this routine should be a no-op, since
- * interrupts will have to stay on to keep the count accurate.
+ * a hardware vblank counter, the driver should use the
+ * drm_vblank_no_hw_counter() function that keeps a virtual counter.
*
* RETURNS
* Zero on success, appropriate errno if the given @crtc's vblank
@@ -467,8 +464,8 @@ struct drm_driver {
* @pipe: which irq to enable
*
* Disable vblank interrupts for @crtc. If the device doesn't have
- * a hardware vblank counter, this routine should be a no-op, since
- * interrupts will have to stay on to keep the count accurate.
+ * a hardware vblank counter, the driver should use the
+ * drm_vblank_no_hw_counter() function that keeps a virtual counter.
*/
void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
@@ -725,10 +722,10 @@ struct drm_vblank_crtc {
wait_queue_head_t queue; /**< VBLANK wait queue */
struct timer_list disable_timer; /* delayed disable timer */
- /* vblank counter, protected by dev->vblank_time_lock for writes */
- u32 count;
- /* vblank timestamps, protected by dev->vblank_time_lock for writes */
- struct timeval time[DRM_VBLANKTIME_RBSIZE];
+ seqlock_t seqlock; /* protects vblank count and time */
+
+ u32 count; /* vblank counter */
+ struct timeval time; /* vblank timestamp */
atomic_t refcount; /* number of users of vblank interruptsper crtc */
u32 last; /* protected by dev->vbl_lock, used */
@@ -994,6 +991,7 @@ extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
+extern u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 92c84e9ab09a..d12cfb9c6062 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -71,7 +71,7 @@ static inline struct drm_crtc_state *
drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
- return state->crtc_states[drm_crtc_index(crtc)];
+ return state->crtcs[drm_crtc_index(crtc)].state;
}
/**
@@ -86,7 +86,7 @@ static inline struct drm_plane_state *
drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane)
{
- return state->plane_states[drm_plane_index(plane)];
+ return state->planes[drm_plane_index(plane)].state;
}
/**
@@ -106,7 +106,43 @@ drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
if (index >= state->num_connector)
return NULL;
- return state->connector_states[index];
+ return state->connectors[index].state;
+}
+
+/**
+ * __drm_atomic_get_current_plane_state - get current plane state
+ * @state: global atomic state object
+ * @plane: plane to grab
+ *
+ * This function returns the plane state for the given plane, either from
+ * @state, or if the plane isn't part of the atomic state update, from @plane.
+ * This is useful in atomic check callbacks, when drivers need to peek at, but
+ * not change, state of other planes, since it avoids threading an error code
+ * back up the call chain.
+ *
+ * WARNING:
+ *
+ * Note that this function is in general unsafe since it doesn't check for the
+ * required locking for access state structures. Drivers must ensure that it is
+ * safe to access the returned state structure through other means. One common
+ * example is when planes are fixed to a single CRTC, and the driver knows that
+ * the CRTC lock is held already. In that case holding the CRTC lock gives a
+ * read-lock on all planes connected to that CRTC. But if planes can be
+ * reassigned things get more tricky. In that case it's better to use
+ * drm_atomic_get_plane_state and wire up full error handling.
+ *
+ * Returns:
+ *
+ * Read-only pointer to the current plane state.
+ */
+static inline const struct drm_plane_state *
+__drm_atomic_get_current_plane_state(struct drm_atomic_state *state,
+ struct drm_plane *plane)
+{
+ if (state->planes[drm_plane_index(plane)].state)
+ return state->planes[drm_plane_index(plane)].state;
+
+ return plane->state;
}
int __must_check
@@ -139,27 +175,27 @@ int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
int __must_check drm_atomic_commit(struct drm_atomic_state *state);
int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
-#define for_each_connector_in_state(state, connector, connector_state, __i) \
+#define for_each_connector_in_state(__state, connector, connector_state, __i) \
for ((__i) = 0; \
- (__i) < (state)->num_connector && \
- ((connector) = (state)->connectors[__i], \
- (connector_state) = (state)->connector_states[__i], 1); \
+ (__i) < (__state)->num_connector && \
+ ((connector) = (__state)->connectors[__i].ptr, \
+ (connector_state) = (__state)->connectors[__i].state, 1); \
(__i)++) \
for_each_if (connector)
-#define for_each_crtc_in_state(state, crtc, crtc_state, __i) \
+#define for_each_crtc_in_state(__state, crtc, crtc_state, __i) \
for ((__i) = 0; \
- (__i) < (state)->dev->mode_config.num_crtc && \
- ((crtc) = (state)->crtcs[__i], \
- (crtc_state) = (state)->crtc_states[__i], 1); \
+ (__i) < (__state)->dev->mode_config.num_crtc && \
+ ((crtc) = (__state)->crtcs[__i].ptr, \
+ (crtc_state) = (__state)->crtcs[__i].state, 1); \
(__i)++) \
for_each_if (crtc_state)
-#define for_each_plane_in_state(state, plane, plane_state, __i) \
+#define for_each_plane_in_state(__state, plane, plane_state, __i) \
for ((__i) = 0; \
- (__i) < (state)->dev->mode_config.num_total_plane && \
- ((plane) = (state)->planes[__i], \
- (plane_state) = (state)->plane_states[__i], 1); \
+ (__i) < (__state)->dev->mode_config.num_total_plane && \
+ ((plane) = (__state)->planes[__i].ptr, \
+ (plane_state) = (__state)->planes[__i].state, 1); \
(__i)++) \
for_each_if (plane_state)
static inline bool
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index d473dcc91f54..1877a7c18d8e 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -147,9 +147,9 @@ void
__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
-void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
- u16 *red, u16 *green, u16 *blue,
- uint32_t start, uint32_t size);
+int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue,
+ uint32_t size);
/**
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
@@ -159,7 +159,7 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
* This iterates over the current state, useful (for example) when applying
* atomic state after it has been checked and swapped. To iterate over the
* planes which *will* be attached (for ->atomic_check()) see
- * drm_crtc_for_each_pending_plane()
+ * drm_crtc_for_each_pending_plane().
*/
#define drm_atomic_crtc_for_each_plane(plane, crtc) \
drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask)
@@ -171,11 +171,31 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
*
* Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
* attached if the specified state is applied. Useful during (for example)
- * ->atomic_check() operations, to validate the incoming state
+ * ->atomic_check() operations, to validate the incoming state.
*/
#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \
drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
+/**
+ * drm_crtc_atomic_state_for_each_plane_state - iterate over attached planes in new state
+ * @plane: the loop cursor
+ * @plane_state: loop cursor for the plane's state, must be const
+ * @crtc_state: the incoming crtc-state
+ *
+ * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
+ * attached if the specified state is applied. Useful during (for example)
+ * ->atomic_check() operations, to validate the incoming state.
+ *
+ * Compared to just drm_atomic_crtc_state_for_each_plane() this also fills in a
+ * const plane_state. This is useful when a driver just wants to peek at other
+ * active planes on this crtc, but does not need to change it.
+ */
+#define drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) \
+ drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) \
+ for_each_if ((plane_state = \
+ __drm_atomic_get_current_plane_state((crtc_state)->state, \
+ plane)))
+
/*
* drm_atomic_plane_disabling - check whether a plane is being disabled
* @plane: plane object
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 36d3bbf9ea75..1a8d66ca677c 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -253,6 +253,8 @@ struct drm_framebuffer {
int bits_per_pixel;
int flags;
uint32_t pixel_format; /* fourcc format */
+ int hot_x;
+ int hot_y;
struct list_head filp_head;
};
@@ -314,6 +316,7 @@ struct drm_plane_helper_funcs;
* update to ensure framebuffer cleanup isn't done too early
* @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
* @mode: current mode timings
+ * @mode_blob: &drm_property_blob for @mode
* @degamma_lut: Lookup table for converting framebuffer pixel data
* before apply the conversion matrix
* @ctm: Transformation matrix
@@ -478,8 +481,8 @@ struct drm_crtc_funcs {
* going on, which should eventually be unified to just one set of
* hooks.
*/
- void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size);
+ int (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ uint32_t size);
/**
* @destroy:
@@ -708,6 +711,7 @@ struct drm_crtc_funcs {
* @dev: parent DRM device
* @port: OF node used by drm_of_find_possible_crtcs()
* @head: list management
+ * @name: human readable name, can be overwritten by the driver
* @mutex: per-CRTC locking
* @base: base KMS object for ID tracking etc.
* @primary: primary plane for this CRTC
@@ -738,12 +742,13 @@ struct drm_crtc {
char *name;
- /*
- * crtc mutex
+ /**
+ * @mutex:
*
* This provides a read lock for the overall crtc state (mode, dpms
* state, ...) and a write lock for everything which can be update
- * without a full modeset (fb, cursor data, ...)
+ * without a full modeset (fb, cursor data, crtc properties ...). Full
+ * modeset also need to grab dev->mode_config.connection_mutex.
*/
struct drm_modeset_lock mutex;
@@ -753,6 +758,9 @@ struct drm_crtc {
struct drm_plane *primary;
struct drm_plane *cursor;
+ /* position inside the mode_config.list, can be used as a [] idx */
+ unsigned index;
+
/* position of cursor plane on crtc */
int cursor_x;
int cursor_y;
@@ -1078,7 +1086,7 @@ struct drm_encoder_funcs {
* @dev: parent DRM device
* @head: list management
* @base: base KMS object
- * @name: encoder name
+ * @name: human readable name, can be overwritten by the driver
* @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
* @possible_crtcs: bitmask of potential CRTC bindings
* @possible_clones: bitmask of potential sibling encoders for cloning
@@ -1097,6 +1105,10 @@ struct drm_encoder {
struct drm_mode_object base;
char *name;
int encoder_type;
+
+ /* position inside the mode_config.list, can be used as a [] idx */
+ unsigned index;
+
uint32_t possible_crtcs;
uint32_t possible_clones;
@@ -1124,7 +1136,8 @@ struct drm_encoder {
* @attr: sysfs attributes
* @head: list management
* @base: base KMS object
- * @name: connector name
+ * @name: human readable name, can be overwritten by the driver
+ * @connector_id: compacted connector id useful indexing arrays
* @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
* @connector_type_id: index into connector type enum
* @interlace_allowed: can this connector handle interlaced modes?
@@ -1137,7 +1150,6 @@ struct drm_encoder {
* @funcs: connector control functions
* @edid_blob_ptr: DRM property containing EDID if present
* @properties: property tracking for this connector
- * @path_blob_ptr: DRM blob property data for the DP MST path property
* @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
* @dpms: current dpms state
* @helper_private: mid-layer private data
@@ -1200,8 +1212,23 @@ struct drm_connector {
struct drm_property_blob *edid_blob_ptr;
struct drm_object_properties properties;
+ /**
+ * @path_blob_ptr:
+ *
+ * DRM blob property data for the DP MST path property.
+ */
struct drm_property_blob *path_blob_ptr;
+ /**
+ * @tile_blob_ptr:
+ *
+ * DRM blob property data for the tile property (used mostly by DP MST).
+ * This is meant for screens which are driven through separate display
+ * pipelines represented by &drm_crtc, which might not be running with
+ * genlocked clocks. For tiled panels which are genlocked, like
+ * dual-link LVDS or dual-link DSI, the driver should try to not expose
+ * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
+ */
struct drm_property_blob *tile_blob_ptr;
uint8_t polled; /* DRM_CONNECTOR_POLL_* */
@@ -1263,6 +1290,7 @@ struct drm_connector {
* plane (in 16.16)
* @src_w: width of visible portion of plane (in 16.16)
* @src_h: height of visible portion of plane (in 16.16)
+ * @rotation: rotation of the plane
* @state: backpointer to global drm_atomic_state
*/
struct drm_plane_state {
@@ -1503,6 +1531,7 @@ enum drm_plane_type {
* struct drm_plane - central DRM plane control structure
* @dev: DRM device this plane belongs to
* @head: for list management
+ * @name: human readable name, can be overwritten by the driver
* @base: base mode object
* @possible_crtcs: pipes this plane can be bound to
* @format_types: array of formats supported by this plane
@@ -1516,6 +1545,7 @@ enum drm_plane_type {
* @properties: property tracking for this plane
* @type: type of plane (overlay, primary, cursor)
* @state: current atomic state for this plane
+ * @helper_private: mid-layer private data
*/
struct drm_plane {
struct drm_device *dev;
@@ -1523,6 +1553,13 @@ struct drm_plane {
char *name;
+ /**
+ * @mutex:
+ *
+ * Protects modeset plane state, together with the mutex of &drm_crtc
+ * this plane is linked to (when active, getting actived or getting
+ * disabled).
+ */
struct drm_modeset_lock mutex;
struct drm_mode_object base;
@@ -1543,6 +1580,9 @@ struct drm_plane {
enum drm_plane_type type;
+ /* position inside the mode_config.list, can be used as a [] idx */
+ unsigned index;
+
const struct drm_plane_helper_funcs *helper_private;
struct drm_plane_state *state;
@@ -1693,19 +1733,31 @@ struct drm_bridge {
void *driver_private;
};
+struct __drm_planes_state {
+ struct drm_plane *ptr;
+ struct drm_plane_state *state;
+};
+
+struct __drm_crtcs_state {
+ struct drm_crtc *ptr;
+ struct drm_crtc_state *state;
+};
+
+struct __drm_connnectors_state {
+ struct drm_connector *ptr;
+ struct drm_connector_state *state;
+};
+
/**
* struct drm_atomic_state - the global state object for atomic updates
* @dev: parent DRM device
* @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
* @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL.
- * @planes: pointer to array of plane pointers
- * @plane_states: pointer to array of plane states pointers
+ * @planes: pointer to array of structures with per-plane data
* @crtcs: pointer to array of CRTC pointers
- * @crtc_states: pointer to array of CRTC states pointers
* @num_connector: size of the @connectors and @connector_states arrays
- * @connectors: pointer to array of connector pointers
- * @connector_states: pointer to array of connector states pointers
+ * @connectors: pointer to array of structures with per-connector data
* @acquire_ctx: acquire context for this atomic modeset state update
*/
struct drm_atomic_state {
@@ -1713,13 +1765,10 @@ struct drm_atomic_state {
bool allow_modeset : 1;
bool legacy_cursor_update : 1;
bool legacy_set_config : 1;
- struct drm_plane **planes;
- struct drm_plane_state **plane_states;
- struct drm_crtc **crtcs;
- struct drm_crtc_state **crtc_states;
+ struct __drm_planes_state *planes;
+ struct __drm_crtcs_state *crtcs;
int num_connector;
- struct drm_connector **connectors;
- struct drm_connector_state **connector_states;
+ struct __drm_connnectors_state *connectors;
struct drm_modeset_acquire_ctx *acquire_ctx;
};
@@ -2022,8 +2071,6 @@ struct drm_mode_config_funcs {
* @connection_mutex: ww mutex protecting connector state and routing
* @acquire_ctx: global implicit acquire context used by atomic drivers for
* legacy IOCTLs
- * @idr_mutex: mutex for KMS ID allocation and management
- * @crtc_idr: main KMS ID tracking object
* @fb_lock: mutex to protect fb state and lists
* @num_fb: number of fbs available
* @fb_list: list of framebuffers available
@@ -2045,6 +2092,7 @@ struct drm_mode_config_funcs {
* @fb_base: base address of the framebuffer
* @poll_enabled: track polling support for this device
* @poll_running: track polling status for this device
+ * @delayed_event: track delayed poll uevent deliver for this device
* @output_poll_work: delayed work for polling in process context
* @property_blob_list: list of all the blob property objects
* @blob_lock: mutex for blob property allocation and management
@@ -2072,10 +2120,30 @@ struct drm_mode_config {
struct mutex mutex; /* protects configuration (mode lists etc.) */
struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */
struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
- struct mutex idr_mutex; /* for IDR management */
- struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
- struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
- /* this is limited to one for now */
+
+ /**
+ * @idr_mutex:
+ *
+ * Mutex for KMS ID allocation and management. Protects both @crtc_idr
+ * and @tile_idr.
+ */
+ struct mutex idr_mutex;
+
+ /**
+ * @crtc_idr:
+ *
+ * Main KMS ID tracking object. Use this idr for all IDs, fb, crtc,
+ * connector, modes - just makes life easier to have only one.
+ */
+ struct idr crtc_idr;
+
+ /**
+ * @tile_idr:
+ *
+ * Use this idr for allocating new IDs for tiled sinks like use in some
+ * high-res DP MST screens.
+ */
+ struct idr tile_idr;
struct mutex fb_lock; /* proctects global and per-file fb lists */
int num_fb;
@@ -2177,7 +2245,11 @@ struct drm_mode_config {
/* whether async page flip is supported or not */
bool async_page_flip;
- /* whether the driver supports fb modifiers */
+ /**
+ * @allow_fb_modifiers:
+ *
+ * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call.
+ */
bool allow_fb_modifiers;
/* cursor size */
@@ -2230,7 +2302,18 @@ int drm_crtc_init_with_planes(struct drm_device *dev,
const struct drm_crtc_funcs *funcs,
const char *name, ...);
extern void drm_crtc_cleanup(struct drm_crtc *crtc);
-extern unsigned int drm_crtc_index(struct drm_crtc *crtc);
+
+/**
+ * drm_crtc_index - find the index of a registered CRTC
+ * @crtc: CRTC to find index for
+ *
+ * Given a registered CRTC, return the index of that CRTC within a DRM
+ * device's list of CRTCs.
+ */
+static inline unsigned int drm_crtc_index(struct drm_crtc *crtc)
+{
+ return crtc->index;
+}
/**
* drm_crtc_mask - find the mask of a registered CRTC
@@ -2284,7 +2367,18 @@ int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
int encoder_type, const char *name, ...);
-extern unsigned int drm_encoder_index(struct drm_encoder *encoder);
+
+/**
+ * drm_encoder_index - find the index of a registered encoder
+ * @encoder: encoder to find index for
+ *
+ * Given a registered encoder, return the index of that encoder within a DRM
+ * device's list of encoders.
+ */
+static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
+{
+ return encoder->index;
+}
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
@@ -2315,7 +2409,18 @@ extern int drm_plane_init(struct drm_device *dev,
const uint32_t *formats, unsigned int format_count,
bool is_primary);
extern void drm_plane_cleanup(struct drm_plane *plane);
-extern unsigned int drm_plane_index(struct drm_plane *plane);
+
+/**
+ * drm_plane_index - find the index of a registered plane
+ * @plane: plane to find index for
+ *
+ * Given a registered plane, return the index of that plane within a DRM
+ * device's list of planes.
+ */
+static inline unsigned int drm_plane_index(struct drm_plane *plane)
+{
+ return plane->index;
+}
extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
extern void drm_plane_force_disable(struct drm_plane *plane);
extern int drm_plane_check_pixel_format(const struct drm_plane *plane,
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 9d03f167007b..5a848e734422 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -622,6 +622,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
#define EDP_PSR_RECEIVER_CAP_SIZE 2
+#define EDP_DISPLAY_CTL_CAP_SIZE 3
void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 7a9840f8b38e..ec552854a8f8 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -263,6 +263,7 @@ int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
+int mipi_dsi_set_tear_scanline(struct mipi_dsi_device *dsi, u16 param);
int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
enum mipi_dsi_dcs_tear_mode mode);
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 625966a906f2..ff481770d76b 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -169,6 +169,8 @@ enum drm_mode_status {
*
* The horizontal and vertical timings are defined per the following diagram.
*
+ * ::
+ *
*
* Active Front Sync Back
* Region Porch Porch
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index d4619dc2eecb..4e7a53b12632 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -736,6 +736,11 @@ struct drm_connector_helper_funcs {
* inspect dynamic configuration state should instead use
* @atomic_best_encoder.
*
+ * You can leave this function to NULL if the connector is only
+ * attached to a single encoder and you are using the atomic helpers.
+ * In this case, the core will call drm_atomic_helper_best_encoder()
+ * for you.
+ *
* RETURNS:
*
* Encoder that should be used for the given connector and connector
@@ -752,8 +757,9 @@ struct drm_connector_helper_funcs {
* need to select the best encoder depending upon the desired
* configuration and can't select it statically.
*
- * This function is used by drm_atomic_helper_check_modeset() and either
- * this or @best_encoder is required.
+ * This function is used by drm_atomic_helper_check_modeset().
+ * If it is not implemented, the core will fallback to @best_encoder
+ * (or drm_atomic_helper_best_encoder() if @best_encoder is NULL).
*
* NOTE:
*
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 595f85c392ac..b1755f8db36b 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -92,4 +92,7 @@ extern bool i915_gpu_turbo_disable(void);
#define I845_TSEG_SIZE_512K (2 << 1)
#define I845_TSEG_SIZE_1M (3 << 1)
+#define INTEL_BSM 0x5c
+#define INTEL_BSM_MASK (0xFFFF << 20)
+
#endif /* _I915_DRM_H_ */
diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h
new file mode 100644
index 000000000000..86baaa45567c
--- /dev/null
+++ b/include/linux/fence-array.h
@@ -0,0 +1,73 @@
+/*
+ * fence-array: aggregates fence to be waited together
+ *
+ * Copyright (C) 2016 Collabora Ltd
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Authors:
+ * Gustavo Padovan <gustavo@padovan.org>
+ * Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_FENCE_ARRAY_H
+#define __LINUX_FENCE_ARRAY_H
+
+#include <linux/fence.h>
+
+/**
+ * struct fence_array_cb - callback helper for fence array
+ * @cb: fence callback structure for signaling
+ * @array: reference to the parent fence array object
+ */
+struct fence_array_cb {
+ struct fence_cb cb;
+ struct fence_array *array;
+};
+
+/**
+ * struct fence_array - fence to represent an array of fences
+ * @base: fence base class
+ * @lock: spinlock for fence handling
+ * @num_fences: number of fences in the array
+ * @num_pending: fences in the array still pending
+ * @fences: array of the fences
+ */
+struct fence_array {
+ struct fence base;
+
+ spinlock_t lock;
+ unsigned num_fences;
+ atomic_t num_pending;
+ struct fence **fences;
+};
+
+extern const struct fence_ops fence_array_ops;
+
+/**
+ * to_fence_array - cast a fence to a fence_array
+ * @fence: fence to cast to a fence_array
+ *
+ * Returns NULL if the fence is not a fence_array,
+ * or the fence_array otherwise.
+ */
+static inline struct fence_array *to_fence_array(struct fence *fence)
+{
+ if (fence->ops != &fence_array_ops)
+ return NULL;
+
+ return container_of(fence, struct fence_array, base);
+}
+
+struct fence_array *fence_array_create(int num_fences, struct fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any);
+
+#endif /* __LINUX_FENCE_ARRAY_H */
diff --git a/include/linux/fence.h b/include/linux/fence.h
index 2056e9fd0138..44d945e96473 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -77,7 +77,8 @@ struct fence {
struct rcu_head rcu;
struct list_head cb_list;
spinlock_t *lock;
- unsigned context, seqno;
+ u64 context;
+ unsigned seqno;
unsigned long flags;
ktime_t timestamp;
int status;
@@ -180,7 +181,7 @@ struct fence_ops {
};
void fence_init(struct fence *fence, const struct fence_ops *ops,
- spinlock_t *lock, unsigned context, unsigned seqno);
+ spinlock_t *lock, u64 context, unsigned seqno);
void fence_release(struct kref *kref);
void fence_free(struct fence *fence);
@@ -354,27 +355,27 @@ static inline signed long fence_wait(struct fence *fence, bool intr)
return ret < 0 ? ret : 0;
}
-unsigned fence_context_alloc(unsigned num);
+u64 fence_context_alloc(unsigned num);
#define FENCE_TRACE(f, fmt, args...) \
do { \
struct fence *__ff = (f); \
if (config_enabled(CONFIG_FENCE_TRACE)) \
- pr_info("f %u#%u: " fmt, \
+ pr_info("f %llu#%u: " fmt, \
__ff->context, __ff->seqno, ##args); \
} while (0)
#define FENCE_WARN(f, fmt, args...) \
do { \
struct fence *__ff = (f); \
- pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \
+ pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
##args); \
} while (0)
#define FENCE_ERR(f, fmt, args...) \
do { \
struct fence *__ff = (f); \
- pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \
+ pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
##args); \
} while (0)
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index e399029b68c5..645ad06b5d52 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -100,14 +100,16 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
}
static inline void __iomem *
-io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
+io_mapping_map_wc(struct io_mapping *mapping,
+ unsigned long offset,
+ unsigned long size)
{
resource_size_t phys_addr;
BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset;
- return ioremap_wc(phys_addr, PAGE_SIZE);
+ return ioremap_wc(phys_addr, size);
}
static inline void
@@ -155,7 +157,9 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
/* Non-atomic map/unmap */
static inline void __iomem *
-io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
+io_mapping_map_wc(struct io_mapping *mapping,
+ unsigned long offset,
+ unsigned long size)
{
return ((char __force __iomem *) mapping) + offset;
}
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index b39a5f3153bd..960bedbdec87 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -165,6 +165,7 @@ int vga_switcheroo_unlock_ddc(struct pci_dev *pdev);
int vga_switcheroo_process_delayed_switch(void);
+bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev);
enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev);
void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
@@ -188,6 +189,7 @@ static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(v
static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; }
static inline int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) { return -ENODEV; }
static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
+static inline bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { return false; }
static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}